Merge pull request #20098 from dotnetrt/fix-win-unix-format
[platform/upstream/coreclr.git] / src / gc / gc.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5
6 //
7 // #Overview
8 //
9 // GC automatically manages memory allocated by managed code.
10 // The design doc for GC can be found at Documentation/botr/garbage-collection.md
11 //
12 // This file includes both the code for GC and the allocator. The most common
13 // case for a GC to be triggered is from the allocator code. See
14 // code:#try_allocate_more_space where it calls GarbageCollectGeneration.
15 //
16 // Entry points for the allocator are GCHeap::Alloc* which are called by the
17 // allocation helpers in gcscan.cpp
18 //
19
20 #include "gcpriv.h"
21 #include "softwarewritewatch.h"
22
23 #define USE_INTROSORT
24
25 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
26 BOOL bgc_heap_walk_for_etw_p = FALSE;
27 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
28
29 #if defined(FEATURE_REDHAWK)
30 #define MAYBE_UNUSED_VAR(v) v = v
31 #else
32 #define MAYBE_UNUSED_VAR(v)
33 #endif // FEATURE_REDHAWK
34
35 #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
36
37 #ifdef SERVER_GC
38 #define partial_size_th 100
39 #define num_partial_refs 64
40 #else //SERVER_GC
41 #define partial_size_th 100
42 #define num_partial_refs 32
43 #endif //SERVER_GC
44
45 #define demotion_plug_len_th (6*1024*1024)
46
47 #ifdef BIT64
48 #define MARK_STACK_INITIAL_LENGTH 1024
49 #else
50 #define MARK_STACK_INITIAL_LENGTH 128
51 #endif // BIT64
52
53 #define LOH_PIN_QUEUE_LENGTH 100
54 #define LOH_PIN_DECAY 10
55
56 #ifdef BIT64
57 // Right now we support maximum 1024 procs - meaning that we will create at most
58 // that many GC threads and GC heaps. 
59 #define MAX_SUPPORTED_CPUS 1024
60 #else
61 #define MAX_SUPPORTED_CPUS 64
62 #endif // BIT64
63
64 #ifdef GC_CONFIG_DRIVEN
65 int compact_ratio = 0;
66 #endif //GC_CONFIG_DRIVEN
67
68 // See comments in reset_memory.
69 BOOL reset_mm_p = TRUE;
70
71 bool g_fFinalizerRunOnShutDown = false;
72
73 #ifdef FEATURE_SVR_GC
74 bool g_built_with_svr_gc = true;
75 #else
76 bool g_built_with_svr_gc = false;
77 #endif // FEATURE_SVR_GC
78
79 #if defined(BUILDENV_DEBUG)
80 uint8_t g_build_variant = 0;
81 #elif defined(BUILDENV_CHECKED)
82 uint8_t g_build_variant = 1;
83 #else
84 uint8_t g_build_variant = 2;
85 #endif // defined(BUILDENV_DEBUG)
86
87 VOLATILE(int32_t) g_no_gc_lock = -1;
88
89 #if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
90 const char * const allocation_state_str[] = {
91     "start",
92     "can_allocate",
93     "cant_allocate",
94     "try_fit",
95     "try_fit_new_seg",
96     "try_fit_new_seg_after_cg",
97     "try_fit_no_seg",
98     "try_fit_after_cg",
99     "try_fit_after_bgc",
100     "try_free_full_seg_in_bgc", 
101     "try_free_after_bgc",
102     "try_seg_end",
103     "acquire_seg",
104     "acquire_seg_after_cg",
105     "acquire_seg_after_bgc",
106     "check_and_wait_for_bgc",
107     "trigger_full_compact_gc",
108     "trigger_ephemeral_gc",
109     "trigger_2nd_ephemeral_gc",
110     "check_retry_seg"
111 };
112 #endif //TRACE_GC && !DACCESS_COMPILE
113
114 // Keep this in sync with the definition of gc_reason
115 #if (defined(DT_LOG) || defined(TRACE_GC)) && !defined (DACCESS_COMPILE)
116 static const char* const str_gc_reasons[] = 
117 {
118     "alloc_soh",
119     "induced",
120     "lowmem",
121     "empty",
122     "alloc_loh",
123     "oos_soh",
124     "oos_loh",
125     "induced_noforce",
126     "gcstress",
127     "induced_lowmem",
128     "induced_compacting"
129 };
130
131 static const char* const str_gc_pause_modes[] = 
132 {
133     "batch",
134     "interactive",
135     "low_latency",
136     "sustained_low_latency",
137     "no_gc"
138 };
139 #endif // defined(DT_LOG) || defined(TRACE_GC)
140
141 inline
142 BOOL is_induced (gc_reason reason)
143 {
144     return ((reason == reason_induced) ||
145             (reason == reason_induced_noforce) ||
146             (reason == reason_lowmemory) ||
147             (reason == reason_lowmemory_blocking) || 
148             (reason == reason_induced_compacting));
149 }
150
151 inline
152 BOOL is_induced_blocking (gc_reason reason)
153 {
154     return ((reason == reason_induced) ||
155             (reason == reason_lowmemory_blocking) || 
156             (reason == reason_induced_compacting));
157 }
158
159 #ifndef DACCESS_COMPILE
160 int64_t qpf;
161
162 size_t GetHighPrecisionTimeStamp()
163 {
164     int64_t ts = GCToOSInterface::QueryPerformanceCounter();
165     
166     return (size_t)(ts / (qpf / 1000));    
167 }
168 #endif
169
170
171 #ifdef GC_STATS
172 // There is a current and a prior copy of the statistics.  This allows us to display deltas per reporting
173 // interval, as well as running totals.  The 'min' and 'max' values require special treatment.  They are
174 // Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
175 // comparison with the global min/max.
176 GCStatistics g_GCStatistics;
177 GCStatistics g_LastGCStatistics;
178
179 char* GCStatistics::logFileName = NULL;
180 FILE*  GCStatistics::logFile = NULL;
181
182 void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
183 {
184 #ifdef BACKGROUND_GC
185     if (settings.concurrent)
186     {
187         bgc.Accumulate((uint32_t)timeInMSec*1000);
188         cntBGC++;
189     }
190     else if (settings.background_p)
191     {
192         fgc.Accumulate((uint32_t)timeInMSec*1000);
193         cntFGC++;
194         if (settings.compaction)
195             cntCompactFGC++;
196         assert(settings.condemned_generation < max_generation);
197         cntFGCGen[settings.condemned_generation]++;
198     }
199     else
200 #endif // BACKGROUND_GC
201     {
202         ngc.Accumulate((uint32_t)timeInMSec*1000);
203         cntNGC++;
204         if (settings.compaction)
205             cntCompactNGC++;
206         cntNGCGen[settings.condemned_generation]++;
207     }
208
209     if (is_induced (settings.reason))
210         cntReasons[(int)reason_induced]++;
211 #ifdef STRESS_HEAP
212     else if (settings.stress_induced)
213         cntReasons[(int)reason_gcstress]++;
214 #endif // STRESS_HEAP
215     else
216         cntReasons[(int)settings.reason]++;
217
218 #ifdef BACKGROUND_GC
219     if (settings.concurrent || !settings.background_p)
220     {
221 #endif // BACKGROUND_GC
222         RollOverIfNeeded();
223 #ifdef BACKGROUND_GC
224     }
225 #endif // BACKGROUND_GC
226 }
227
228 void GCStatistics::Initialize()
229 {
230     LIMITED_METHOD_CONTRACT;
231     // for efficiency sake we're taking a dependency on the layout of a C++ object
232     // with a vtable. protect against violations of our premise:
233     static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
234             "The first field of GCStatistics follows the pointer sized vtable");
235
236     int podOffs = offsetof(GCStatistics, cntDisplay);       // offset of the first POD field
237     memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
238     memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
239 }
240
241 void GCStatistics::DisplayAndUpdate()
242 {
243     LIMITED_METHOD_CONTRACT;
244
245     if (logFileName == NULL || logFile == NULL)
246         return;
247
248     {
249         if (cntDisplay == 0)
250             fprintf(logFile, "\nGCMix **** Initialize *****\n\n");
251             
252         fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);
253
254         // NGC summary (total, timing info)
255         ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);
256
257         // FGC summary (total, timing info)
258         fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);
259
260         // BGC summary
261         bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);
262
263         // NGC/FGC break out by generation & compacting vs. sweeping
264         fprintf(logFile, "NGC   ");
265         for (int i = max_generation; i >= 0; --i)
266             fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
267         fprintf(logFile, "\n");
268
269         fprintf(logFile, "FGC   ");
270         for (int i = max_generation-1; i >= 0; --i)
271             fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
272         fprintf(logFile, "\n");
273
274         // Compacting vs. Sweeping break out
275         int _cntSweep = cntNGC-cntCompactNGC;
276         int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
277         fprintf(logFile, "NGC   Sweeping %d (%d) Compacting %d (%d)\n",
278                _cntSweep - _cntLastSweep, _cntSweep,
279                cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);
280
281         _cntSweep = cntFGC-cntCompactFGC;
282         _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
283         fprintf(logFile, "FGC   Sweeping %d (%d) Compacting %d (%d)\n",
284                _cntSweep - _cntLastSweep, _cntSweep,
285                cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);
286
287 #ifdef TRACE_GC
288         // GC reasons...
289         for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
290         {
291             if (cntReasons[reason] != 0)
292                 fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason], 
293                     cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
294         }
295 #endif // TRACE_GC
296         fprintf(logFile, "\n\n");
297
298         // flush the log file...
299         fflush(logFile);
300     }
301
302     g_LastGCStatistics = *this;
303
304     ngc.Reset();
305     fgc.Reset();
306     bgc.Reset();
307 }
308
309 #endif // GC_STATS
310
311 #ifdef BIT64
312 #define TOTAL_TIMES_TO_SHIFT 6
313 #else
314 #define TOTAL_TIMES_TO_SHIFT 5
315 #endif // BIT64
316
317 size_t round_up_power2 (size_t size)
318 {
319     unsigned short shift = 1;
320     size_t shifted = 0;
321
322     size--;
323     for (unsigned short i = 0; i < TOTAL_TIMES_TO_SHIFT; i++)
324     {
325         shifted = size | (size >> shift);
326         if (shifted == size)
327         {
328             break;
329         }
330
331         size = shifted;
332         shift <<= 1;
333     }
334     shifted++;
335
336     return shifted;
337 }
338
339 inline
340 size_t round_down_power2 (size_t size)
341 {
342     size_t power2 = round_up_power2 (size);
343
344     if (power2 != size)
345     {
346         power2 >>= 1;
347     }
348
349     return power2;
350 }
351
352 // the index starts from 0.
353 int index_of_set_bit (size_t power2)
354 {
355     int low = 0;
356     int high = sizeof (size_t) * 8 - 1;
357     int mid; 
358     while (low <= high)
359     {
360         mid = ((low + high)/2);
361         size_t temp = (size_t)1 << mid;
362         if (power2 & temp)
363         {
364             return mid;
365         }
366         else if (power2 < temp)
367         {
368             high = mid - 1;
369         }
370         else
371         {
372             low = mid + 1;
373         }
374     }
375
376     return -1;
377 }
378
379 inline
380 int relative_index_power2_plug (size_t power2)
381 {
382     int index = index_of_set_bit (power2);
383     assert (index <= MAX_INDEX_POWER2);
384
385     return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2));
386 }
387
388 inline
389 int relative_index_power2_free_space (size_t power2)
390 {
391     int index = index_of_set_bit (power2);
392     assert (index <= MAX_INDEX_POWER2);
393
394     return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2));
395 }
396
397 #ifdef BACKGROUND_GC
398 uint32_t bgc_alloc_spin_count = 140;
399 uint32_t bgc_alloc_spin_count_loh = 16;
400 uint32_t bgc_alloc_spin = 2;
401
402
403 inline
404 void c_write (uint32_t& place, uint32_t value)
405 {
406     Interlocked::Exchange (&place, value);
407     //place = value;
408 }
409
410 #ifndef DACCESS_COMPILE
411 // If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC.
412 const size_t bgc_min_per_heap = 4*1024*1024;
413
414 int gc_heap::gchist_index = 0;
415 gc_mechanisms_store gc_heap::gchist[max_history_count];
416
417 #ifndef MULTIPLE_HEAPS
418 size_t gc_heap::total_promoted_bytes = 0;
419 VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process;
420 int gc_heap::gchist_index_per_heap = 0;
421 gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count];
422 #endif //MULTIPLE_HEAPS
423
424 void gc_heap::add_to_history_per_heap()
425 {
426 #ifdef GC_HISTORY
427     gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap];
428     current_hist->gc_index = settings.gc_index;
429     current_hist->current_bgc_state = current_bgc_state;
430     size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0));
431     current_hist->gc_time_ms = (uint32_t)elapsed;
432     current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes);
433     current_hist->eph_low = generation_allocation_start (generation_of (max_generation-1));
434     current_hist->gen0_start = generation_allocation_start (generation_of (0));
435     current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment);
436 #ifdef BACKGROUND_GC
437     current_hist->bgc_lowest = background_saved_lowest_address;
438     current_hist->bgc_highest = background_saved_highest_address;
439 #endif //BACKGROUND_GC
440     current_hist->fgc_lowest = lowest_address;
441     current_hist->fgc_highest = highest_address;
442     current_hist->g_lowest = g_gc_lowest_address;
443     current_hist->g_highest = g_gc_highest_address;
444
445     gchist_index_per_heap++;
446     if (gchist_index_per_heap == max_history_count)
447     {
448         gchist_index_per_heap = 0;
449     }
450 #endif //GC_HISTORY
451 }
452
453 void gc_heap::add_to_history()
454 {
455 #ifdef GC_HISTORY
456     gc_mechanisms_store* current_settings = &gchist[gchist_index];
457     current_settings->store (&settings);
458
459     gchist_index++;
460     if (gchist_index == max_history_count)
461     {
462         gchist_index = 0;
463     }
464 #endif //GC_HISTORY
465 }
466
467 #endif //DACCESS_COMPILE
468 #endif //BACKGROUND_GC
469
470 #if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
471 BOOL   gc_log_on = TRUE;
472 FILE* gc_log = NULL;
473 size_t gc_log_file_size = 0;
474
475 size_t gc_buffer_index = 0;
476 size_t max_gc_buffers = 0;
477
478 static CLRCriticalSection gc_log_lock;
479
480 // we keep this much in a buffer and only flush when the buffer is full
481 #define gc_log_buffer_size (1024*1024)
482 uint8_t* gc_log_buffer = 0;
483 size_t gc_log_buffer_offset = 0;
484
485 void log_va_msg(const char *fmt, va_list args)
486 {
487     gc_log_lock.Enter();
488
489     const int BUFFERSIZE = 512;
490     static char rgchBuffer[BUFFERSIZE];
491     char *  pBuffer  = &rgchBuffer[0];
492
493     pBuffer[0] = '\n';
494     int buffer_start = 1;
495     int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging());
496     buffer_start += pid_len;
497     memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
498     int msg_len = _vsnprintf_s(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
499     if (msg_len == -1)
500     {
501         msg_len = BUFFERSIZE - buffer_start;
502     }
503
504     msg_len += buffer_start;
505
506     if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12))
507     {
508         char index_str[8];
509         memset (index_str, '-', 8);
510         sprintf_s (index_str, _countof(index_str), "%d", (int)gc_buffer_index);
511         gc_log_buffer[gc_log_buffer_offset] = '\n';
512         memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8);
513
514         gc_buffer_index++;
515         if (gc_buffer_index > max_gc_buffers)
516         {
517             fseek (gc_log, 0, SEEK_SET);
518             gc_buffer_index = 0;
519         }
520         fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
521         fflush(gc_log);
522         memset (gc_log_buffer, '*', gc_log_buffer_size);
523         gc_log_buffer_offset = 0;
524     }
525
526     memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
527     gc_log_buffer_offset += msg_len;
528
529     gc_log_lock.Leave();
530 }
531
532 void GCLog (const char *fmt, ... )
533 {
534     if (gc_log_on && (gc_log != NULL))
535     {
536         va_list     args;
537         va_start(args, fmt);
538         log_va_msg (fmt, args);
539         va_end(args);
540     }
541 }
542 #endif // TRACE_GC && !DACCESS_COMPILE
543
544 #if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)
545
546 BOOL   gc_config_log_on = FALSE;
547 FILE* gc_config_log = NULL;
548
549 // we keep this much in a buffer and only flush when the buffer is full
550 #define gc_config_log_buffer_size (1*1024) // TEMP
551 uint8_t* gc_config_log_buffer = 0;
552 size_t gc_config_log_buffer_offset = 0;
553
554 // For config since we log so little we keep the whole history. Also it's only
555 // ever logged by one thread so no need to synchronize.
556 void log_va_msg_config(const char *fmt, va_list args)
557 {
558     const int BUFFERSIZE = 256;
559     static char rgchBuffer[BUFFERSIZE];
560     char *  pBuffer  = &rgchBuffer[0];
561
562     pBuffer[0] = '\n';
563     int buffer_start = 1;
564     int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
565     assert (msg_len != -1);
566     msg_len += buffer_start;
567
568     if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
569     {
570         fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
571         fflush(gc_config_log);
572         gc_config_log_buffer_offset = 0;
573     }
574
575     memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len);
576     gc_config_log_buffer_offset += msg_len;
577 }
578
579 void GCLogConfig (const char *fmt, ... )
580 {
581     if (gc_config_log_on && (gc_config_log != NULL))
582     {
583         va_list     args;
584         va_start( args, fmt );
585         log_va_msg_config (fmt, args);
586     }
587 }
588 #endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE
589
590 #ifdef SYNCHRONIZATION_STATS
591
592 // Number of GCs have we done since we last logged.
593 static unsigned int         gc_count_during_log;
594  // In ms. This is how often we print out stats.
595 static const unsigned int   log_interval = 5000;
596 // Time (in ms) when we start a new log interval.
597 static unsigned int         log_start_tick;
598 static unsigned int         gc_lock_contended;
599 static int64_t              log_start_hires;
600 // Cycles accumulated in SuspendEE during log_interval.
601 static uint64_t             suspend_ee_during_log;
602 // Cycles accumulated in RestartEE during log_interval.
603 static uint64_t             restart_ee_during_log;
604 static uint64_t             gc_during_log;
605
606 #endif //SYNCHRONIZATION_STATS
607
608 void
609 init_sync_log_stats()
610 {
611 #ifdef SYNCHRONIZATION_STATS
612     if (gc_count_during_log == 0)
613     {
614         gc_heap::init_sync_stats();
615         suspend_ee_during_log = 0;
616         restart_ee_during_log = 0;
617         gc_during_log = 0;
618         gc_lock_contended = 0;
619
620         log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
621         log_start_hires = GCToOSInterface::QueryPerformanceCounter();
622     }
623     gc_count_during_log++;
624 #endif //SYNCHRONIZATION_STATS
625 }
626
627 void
628 process_sync_log_stats()
629 {
630 #ifdef SYNCHRONIZATION_STATS
631
632     unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;
633
634     if (log_elapsed > log_interval)
635     {
636         uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires;
637         // Print out the cycles we spent on average in each suspend and restart.
638         printf("\n_________________________________________________________________________________\n"
639             "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n"
640             "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n",
641             log_interval / 1000,
642             gc_count_during_log,
643             gc_lock_contended,
644             (unsigned int)(gc_during_log / gc_count_during_log),
645             (unsigned int)(suspend_ee_during_log / gc_count_during_log),
646             (unsigned int)(restart_ee_during_log / gc_count_during_log),
647             (double)(100.0f * gc_during_log / total));
648         gc_heap::print_sync_stats(gc_count_during_log);
649
650         gc_count_during_log = 0;
651     }
652 #endif //SYNCHRONIZATION_STATS
653 }
654
655 #ifdef MULTIPLE_HEAPS
656
657 enum gc_join_stage
658 {
659     gc_join_init_cpu_mapping = 0,
660     gc_join_done = 1,
661     gc_join_generation_determined = 2,
662     gc_join_begin_mark_phase = 3,
663     gc_join_scan_dependent_handles = 4,
664     gc_join_rescan_dependent_handles = 5,
665     gc_join_scan_sizedref_done = 6,
666     gc_join_null_dead_short_weak = 7,
667     gc_join_scan_finalization = 8,
668     gc_join_null_dead_long_weak = 9, 
669     gc_join_null_dead_syncblk = 10, 
670     gc_join_decide_on_compaction = 11, 
671     gc_join_rearrange_segs_compaction = 12, 
672     gc_join_adjust_handle_age_compact = 13,
673     gc_join_adjust_handle_age_sweep = 14,
674     gc_join_begin_relocate_phase = 15,
675     gc_join_relocate_phase_done = 16,
676     gc_join_verify_objects_done = 17,
677     gc_join_start_bgc = 18,
678     gc_join_restart_ee = 19,
679     gc_join_concurrent_overflow = 20,
680     gc_join_suspend_ee = 21,
681     gc_join_bgc_after_ephemeral = 22,
682     gc_join_allow_fgc = 23,
683     gc_join_bgc_sweep = 24,
684     gc_join_suspend_ee_verify = 25,
685     gc_join_restart_ee_verify = 26,
686     gc_join_set_state_free = 27,
687     gc_r_join_update_card_bundle = 28,
688     gc_join_after_absorb = 29, 
689     gc_join_verify_copy_table = 30,
690     gc_join_after_reset = 31,
691     gc_join_after_ephemeral_sweep = 32,
692     gc_join_after_profiler_heap_walk = 33,
693     gc_join_minimal_gc = 34,
694     gc_join_after_commit_soh_no_gc = 35,
695     gc_join_expand_loh_no_gc = 36,
696     gc_join_final_no_gc = 37,
697     gc_join_disable_software_write_watch = 38,
698     gc_join_max = 39
699 };
700
701 enum gc_join_flavor
702 {
703     join_flavor_server_gc = 0,
704     join_flavor_bgc = 1
705 };
706
707 #define first_thread_arrived 2
708 struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure
709 {
710     // Shared non volatile keep on separate line to prevent eviction
711     int n_threads;
712
713     // Keep polling/wait structures on separate line write once per join
714     DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
715     GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived.
716     Volatile<int> lock_color;
717     VOLATILE(BOOL) wait_done;
718     VOLATILE(BOOL) joined_p;
719
720     // Keep volatile counted locks on separate cache line write many per join
721     DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
722     VOLATILE(int32_t) join_lock;
723     VOLATILE(int32_t) r_join_lock;
724
725 };
726
727 enum join_type 
728 {
729     type_last_join = 0, 
730     type_join = 1, 
731     type_restart = 2, 
732     type_first_r_join = 3, 
733     type_r_join = 4
734 };
735
736 enum join_time 
737 {
738     time_start = 0, 
739     time_end = 1
740 };
741
742 enum join_heap_index
743 {
744     join_heap_restart = 100,
745     join_heap_r_restart = 200
746 };
747
748 struct join_event
749 {
750     uint32_t heap;
751     join_time time;
752     join_type type;
753 };
754
755 class t_join
756 {
757     join_structure join_struct;
758
759     int id;
760     gc_join_flavor flavor;
761
762 #ifdef JOIN_STATS
763     uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq;
764     // remember join id and last thread to arrive so restart can use these
765     int thd;
766     // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval
767     uint32_t start_tick;
768     // counters for joins, in 1000's of clock cycles
769     uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max];
770 #endif //JOIN_STATS
771
772 public:
773     BOOL init (int n_th, gc_join_flavor f)
774     {
775         dprintf (JOIN_LOG, ("Initializing join structure"));
776         join_struct.n_threads = n_th;
777         join_struct.lock_color = 0;
778         for (int i = 0; i < 3; i++)
779         {
780             if (!join_struct.joined_event[i].IsValid())
781             {
782                 join_struct.joined_p = FALSE;
783                 dprintf (JOIN_LOG, ("Creating join event %d", i));
784                 // TODO - changing this to a non OS event
785                 // because this is also used by BGC threads which are 
786                 // managed threads and WaitEx does not allow you to wait
787                 // for an OS event on a managed thread.
788                 // But we are not sure if this plays well in the hosting 
789                 // environment.
790                 //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE);
791                 if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE))
792                     return FALSE;
793             }
794         }
795         join_struct.join_lock = join_struct.n_threads;
796         join_struct.r_join_lock = join_struct.n_threads;
797         join_struct.wait_done = FALSE;
798         flavor = f;
799
800 #ifdef JOIN_STATS
801         start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
802 #endif //JOIN_STATS
803
804         return TRUE;
805     }
806     
807     void destroy ()
808     {
809         dprintf (JOIN_LOG, ("Destroying join structure"));
810         for (int i = 0; i < 3; i++)
811         {
812             if (join_struct.joined_event[i].IsValid())
813                 join_struct.joined_event[i].CloseEvent();
814         }
815     }
816
817     inline void fire_event (int heap, join_time time, join_type type, int join_id)
818     {
819         FIRE_EVENT(GCJoin_V2, heap, time, type, join_id);
820     }
821
822     void join (gc_heap* gch, int join_id)
823     {
824 #ifdef JOIN_STATS
825         // parallel execution ends here
826         end[gch->heap_number] = get_ts();
827 #endif //JOIN_STATS
828
829         assert (!join_struct.joined_p);
830         int color = join_struct.lock_color.LoadWithoutBarrier();
831
832         if (Interlocked::Decrement(&join_struct.join_lock) != 0)
833         {
834             dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d", 
835                 flavor, join_id, (int32_t)(join_struct.join_lock)));
836
837             fire_event (gch->heap_number, time_start, type_join, join_id);
838
839             //busy wait around the color
840             if (color == join_struct.lock_color.LoadWithoutBarrier())
841             {
842 respin:
843                 int spin_count = 4096 * (gc_heap::n_heaps - 1);
844                 for (int j = 0; j < spin_count; j++)
845                 {
846                     if (color != join_struct.lock_color.LoadWithoutBarrier())
847                     {
848                         break;
849                     }
850                     YieldProcessor();           // indicate to the processor that we are spinning
851                 }
852
853                 // we've spun, and if color still hasn't changed, fall into hard wait
854                 if (color == join_struct.lock_color.LoadWithoutBarrier())
855                 {
856                     dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d", 
857                         flavor, join_id, color, (int32_t)(join_struct.join_lock)));
858
859                     //Thread* current_thread = GCToEEInterface::GetThread();
860                     //BOOL cooperative_mode = gc_heap::enable_preemptive (current_thread);
861                     uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
862                     //gc_heap::disable_preemptive (current_thread, cooperative_mode);
863
864                     if (dwJoinWait != WAIT_OBJECT_0)
865                     {
866                         STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
867                         FATAL_GC_ERROR ();
868                     }
869                 }
870
871                 // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
872                 if (color == join_struct.lock_color.LoadWithoutBarrier())
873                 {
874                     goto respin;
875                 }
876
877                 dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d", 
878                     flavor, join_id, (int32_t)(join_struct.join_lock)));
879             }
880
881             fire_event (gch->heap_number, time_end, type_join, join_id);
882
883 #ifdef JOIN_STATS
884             // parallel execution starts here
885             start[gch->heap_number] = get_ts();
886             Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number]));
887 #endif //JOIN_STATS
888         }
889         else
890         {
891             fire_event (gch->heap_number, time_start, type_last_join, join_id);
892
893             join_struct.joined_p = TRUE;
894             dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
895             join_struct.joined_event[!color].Reset();
896             id = join_id;
897             // this one is alone so it can proceed
898 #ifdef JOIN_STATS
899             // remember the join id, the last thread arriving, the start of the sequential phase,
900             // and keep track of the cycles spent waiting in the join
901             thd = gch->heap_number;
902             start_seq = get_ts();
903             Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number]));
904 #endif //JOIN_STATS
905         }
906     }
907
908     // Reverse join - first thread gets here does the work; other threads will only proceed
909     // after the work is done.
910     // Note that you cannot call this twice in a row on the same thread. Plus there's no 
911     // need to call it twice in row - you should just merge the work.
912     BOOL r_join (gc_heap* gch, int join_id)
913     {
914
915         if (join_struct.n_threads == 1)
916         {
917             return TRUE;
918         }
919
920         if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0)
921         {
922             if (!join_struct.wait_done)
923             {
924                 dprintf (JOIN_LOG, ("r_join() Waiting..."));
925
926                 fire_event (gch->heap_number, time_start, type_join, join_id);
927
928                 //busy wait around the color
929                 if (!join_struct.wait_done)
930                 {
931         respin:
932                     int spin_count = 2 * 4096 * (gc_heap::n_heaps - 1);
933                     for (int j = 0; j < spin_count; j++)
934                     {
935                         if (join_struct.wait_done)
936                         {
937                             break;
938                         }
939                         YieldProcessor();           // indicate to the processor that we are spinning
940                     }
941
942                     // we've spun, and if color still hasn't changed, fall into hard wait
943                     if (!join_struct.wait_done)
944                     {
945                         dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived));
946                         uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE);
947                         if (dwJoinWait != WAIT_OBJECT_0)
948                         {
949                             STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
950                             FATAL_GC_ERROR ();
951                         }
952                     }
953
954                     // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
955                     if (!join_struct.wait_done)
956                     {
957                         goto respin;
958                     }
959
960                     dprintf (JOIN_LOG, ("r_join() done"));
961                 }
962
963                 fire_event (gch->heap_number, time_end, type_join, join_id);
964             }
965
966             return FALSE;
967         }
968         else
969         {
970             fire_event (gch->heap_number, time_start, type_first_r_join, join_id);
971             return TRUE;
972         }
973     }
974
975 #ifdef JOIN_STATS
976     uint64_t get_ts()
977     {
978         return GCToOSInterface::QueryPerformanceCounter();
979     }
980
981     void start_ts (gc_heap* gch)
982     {
983         // parallel execution ends here
984         start[gch->heap_number] = get_ts();
985     }
986 #endif //JOIN_STATS
987
988     void restart()
989     {
990 #ifdef JOIN_STATS
991         uint64_t elapsed_seq = get_ts() - start_seq;
992         uint64_t max = 0, sum = 0, wake = 0;
993         uint64_t min_ts = start[0];
994         for (int i = 1; i < join_struct.n_threads; i++)
995         {
996             if(min_ts > start[i]) min_ts = start[i];
997         }
998
999         for (int i = 0; i < join_struct.n_threads; i++)
1000         {
1001             uint64_t wake_delay = start[i] - min_ts;
1002             uint64_t elapsed = end[i] - start[i];
1003             if (max < elapsed)
1004                 max = elapsed;
1005             sum += elapsed;
1006             wake += wake_delay;
1007         }
1008         uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq;
1009         uint64_t par_loss = join_struct.n_threads*max - sum;
1010         double efficiency = 0.0;
1011         if (max > 0)
1012             efficiency = sum*100.0/(join_struct.n_threads*max);
1013
1014         const double ts_scale = 1e-6;
1015
1016         // enable this printf to get statistics on each individual join as it occurs
1017 //      printf("join #%3d  seq_loss = %5g   par_loss = %5g  efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
1018
1019         elapsed_total[id] += sum;
1020         wake_total[id] += wake;
1021         seq_loss_total[id] += seq_loss;
1022         par_loss_total[id] += par_loss;
1023
1024         // every 10 seconds, print a summary of the time spent in each type of join
1025         if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
1026         {
1027             printf("**** summary *****\n");
1028             for (int i = 0; i < 16; i++)
1029             {
1030                 printf("join #%3d  elapsed_total = %8g wake_loss = %8g seq_loss = %8g  par_loss = %8g  in_join_total = %8g\n",
1031                    i,
1032                    ts_scale*elapsed_total[i],
1033                    ts_scale*wake_total[i],
1034                    ts_scale*seq_loss_total[i],
1035                    ts_scale*par_loss_total[i],
1036                    ts_scale*in_join_total[i]);
1037                 elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
1038             }
1039             start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
1040         }
1041 #endif //JOIN_STATS
1042
1043         fire_event (join_heap_restart, time_start, type_restart, -1);
1044         assert (join_struct.joined_p);
1045         join_struct.joined_p = FALSE;
1046         join_struct.join_lock = join_struct.n_threads;
1047         dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1048 //        printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
1049         int color = join_struct.lock_color.LoadWithoutBarrier();
1050         join_struct.lock_color = !color;
1051         join_struct.joined_event[color].Set();
1052
1053 //        printf("Set joined_event %d\n", !join_struct.lock_color);
1054
1055         fire_event (join_heap_restart, time_end, type_restart, -1);
1056
1057 #ifdef JOIN_STATS
1058         start[thd] = get_ts();
1059 #endif //JOIN_STATS
1060     }
1061     
1062     BOOL joined()
1063     {
1064         dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1065         return join_struct.joined_p;
1066     }
1067
1068     void r_restart()
1069     {
1070         if (join_struct.n_threads != 1)
1071         {
1072             fire_event (join_heap_r_restart, time_start, type_restart, -1);
1073             join_struct.wait_done = TRUE;
1074             join_struct.joined_event[first_thread_arrived].Set();
1075             fire_event (join_heap_r_restart, time_end, type_restart, -1);
1076         }
1077     }
1078
1079     void r_init()
1080     {
1081         if (join_struct.n_threads != 1)
1082         {
1083             join_struct.r_join_lock = join_struct.n_threads;
1084             join_struct.wait_done = FALSE;
1085             join_struct.joined_event[first_thread_arrived].Reset();
1086         }
1087     }
1088 };
1089
1090 t_join gc_t_join;
1091
1092 #ifdef BACKGROUND_GC
1093 t_join bgc_t_join;
1094 #endif //BACKGROUND_GC
1095
1096 #endif //MULTIPLE_HEAPS
1097
1098 #define spin_and_switch(count_to_spin, expr) \
1099 { \
1100     for (int j = 0; j < count_to_spin; j++) \
1101     { \
1102         if (expr) \
1103         { \
1104             break;\
1105         } \
1106         YieldProcessor(); \
1107     } \
1108     if (!(expr)) \
1109     { \
1110         GCToOSInterface::YieldThread(0); \
1111     } \
1112 }
1113
1114 #ifndef DACCESS_COMPILE
1115 #ifdef BACKGROUND_GC
1116
1117 #define max_pending_allocs 64
1118
1119 class exclusive_sync
1120 {
1121     // TODO - verify that this is the right syntax for Volatile.
1122     VOLATILE(uint8_t*) rwp_object;
1123     VOLATILE(int32_t) needs_checking;
1124     
1125     int spin_count;
1126
1127     uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)];
1128
1129     // TODO - perhaps each object should be on its own cache line...
1130     VOLATILE(uint8_t*) alloc_objects[max_pending_allocs];
1131
1132     int find_free_index ()
1133     {
1134         for (int i = 0; i < max_pending_allocs; i++)
1135         {
1136             if (alloc_objects [i] == (uint8_t*)0)
1137             {
1138                 return i;
1139             }
1140         }
1141  
1142         return -1;
1143     }
1144
1145 public:
1146     void init()
1147     {
1148         spin_count = 32 * (g_num_processors - 1);
1149         rwp_object = 0;
1150         needs_checking = 0;
1151         for (int i = 0; i < max_pending_allocs; i++)
1152         {
1153             alloc_objects [i] = (uint8_t*)0;
1154         }
1155     }
1156
1157     void check()
1158     {
1159         for (int i = 0; i < max_pending_allocs; i++)
1160         {
1161             if (alloc_objects [i] != (uint8_t*)0)
1162             {
1163                 GCToOSInterface::DebugBreak();
1164             }
1165         }
1166     }
1167
1168     void bgc_mark_set (uint8_t* obj)
1169     {
1170         dprintf (3, ("cm: probing %Ix", obj));
1171 retry:
1172         if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1173         {
1174             // If we spend too much time spending all the allocs,
1175             // consider adding a high water mark and scan up
1176             // to that; we'll need to interlock in done when
1177             // we update the high watermark.
1178             for (int i = 0; i < max_pending_allocs; i++)
1179             {
1180                 if (obj == alloc_objects[i])
1181                 {
1182                     needs_checking = 0;
1183                     dprintf (3, ("cm: will spin"));
1184                     spin_and_switch (spin_count, (obj != alloc_objects[i]));
1185                     goto retry;
1186                 }
1187             }
1188
1189             rwp_object = obj;
1190             needs_checking = 0;
1191             dprintf (3, ("cm: set %Ix", obj));
1192             return;
1193         }
1194         else
1195         {
1196             spin_and_switch (spin_count, (needs_checking == 0));
1197             goto retry;
1198         }
1199     }
1200
1201     int loh_alloc_set (uint8_t* obj)
1202     {
1203         if (!gc_heap::cm_in_progress)
1204         {
1205             return -1;
1206         }
1207
1208 retry:
1209         dprintf (3, ("loh alloc: probing %Ix", obj));
1210
1211         if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1212         {
1213             if (obj == rwp_object)
1214             {
1215                 needs_checking = 0;
1216                 spin_and_switch (spin_count, (obj != rwp_object));
1217                 goto retry;
1218             }
1219             else
1220             {
1221                 int cookie = find_free_index();
1222
1223                 if (cookie != -1)
1224                 {
1225                     alloc_objects[cookie] = obj;
1226                     needs_checking = 0;
1227                     //if (cookie >= 4)
1228                     //{
1229                     //    GCToOSInterface::DebugBreak();
1230                     //}
1231
1232                     dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
1233                     return cookie;
1234                 } 
1235                 else 
1236                 {
1237                     needs_checking = 0;
1238                     dprintf (3, ("loh alloc: setting %Ix will spin to acquire a free index", obj));
1239                     spin_and_switch (spin_count, (find_free_index () != -1));
1240                     goto retry;
1241                 }
1242             }
1243         }
1244         else
1245         {
1246             dprintf (3, ("loh alloc: will spin on checking %Ix", obj));
1247             spin_and_switch (spin_count, (needs_checking == 0));
1248             goto retry;
1249         }
1250     }
1251
1252     void bgc_mark_done ()
1253     {
1254         dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object));
1255         rwp_object = 0;
1256     }
1257
1258     void loh_alloc_done_with_index (int index)
1259     {
1260         dprintf (3, ("loh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index));
1261         assert ((index >= 0) && (index < max_pending_allocs)); 
1262         alloc_objects[index] = (uint8_t*)0;
1263     }
1264
1265     void loh_alloc_done (uint8_t* obj)
1266     {
1267 #ifdef BACKGROUND_GC
1268         if (!gc_heap::cm_in_progress)
1269         {
1270             return;
1271         }
1272
1273         for (int i = 0; i < max_pending_allocs; i++)
1274         {
1275             if (alloc_objects [i] == obj)
1276             {
1277                 dprintf (3, ("loh alloc: release lock on %Ix at %d", (uint8_t *)alloc_objects[i], i));
1278                 alloc_objects[i] = (uint8_t*)0;
1279                 return;
1280             }
1281         }
1282 #endif //BACKGROUND_GC
1283     }
1284 };
1285
1286 // Note that this class was written assuming just synchronization between
1287 // one background GC thread and multiple user threads that might request 
1288 // an FGC - it does not take into account what kind of locks the multiple
1289 // user threads might be holding at the time (eg, there could only be one
1290 // user thread requesting an FGC because it needs to take gc_lock first)
1291 // so you'll see checks that may not be necessary if you take those conditions
1292 // into consideration.
1293 //
1294 // With the introduction of Server Background GC we no longer use this
1295 // class to do synchronization between FGCs and BGC.
1296 class recursive_gc_sync
1297 {
1298     static VOLATILE(int32_t) foreground_request_count;//initial state 0
1299     static VOLATILE(BOOL) gc_background_running; //initial state FALSE
1300     static VOLATILE(int32_t) foreground_count; // initial state 0;
1301     static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
1302     static GCEvent foreground_complete;//Auto Reset
1303     static GCEvent foreground_allowed;//Auto Reset
1304 public:
1305     static void begin_background();
1306     static void end_background();
1307     static void begin_foreground();
1308     static void end_foreground();
1309     BOOL allow_foreground ();
1310     static BOOL init();
1311     static void shutdown();
1312     static BOOL background_running_p() {return gc_background_running;}
1313 };
1314
1315 VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
1316 VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
1317 VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
1318 VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
1319 GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
1320 GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset
1321
1322 BOOL recursive_gc_sync::init ()
1323 {
1324     foreground_request_count = 0;
1325     foreground_count = 0;
1326     gc_background_running = FALSE;
1327     foreground_gate = 0;
1328
1329     if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
1330     {
1331         goto error;
1332     }
1333     if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
1334     {
1335         goto error;
1336     }
1337     return TRUE;
1338
1339 error:
1340     shutdown();
1341     return FALSE;
1342
1343 }
1344
1345 void recursive_gc_sync::shutdown()
1346 {
1347     if (foreground_complete.IsValid())
1348         foreground_complete.CloseEvent();
1349     if (foreground_allowed.IsValid())
1350         foreground_allowed.CloseEvent();
1351 }
1352
1353 void recursive_gc_sync::begin_background()
1354 {
1355     dprintf (2, ("begin background"));
1356     foreground_request_count = 1;
1357     foreground_count = 1;
1358     foreground_allowed.Reset();
1359     gc_background_running = TRUE;
1360 }
1361 void recursive_gc_sync::end_background()
1362 {
1363     dprintf (2, ("end background"));
1364     gc_background_running = FALSE;
1365     foreground_gate = 1;
1366     foreground_allowed.Set();
1367 }
1368
1369 void recursive_gc_sync::begin_foreground()
1370 {
1371     dprintf (2, ("begin_foreground"));
1372
1373     bool cooperative_mode = false;
1374     if (gc_background_running)
1375     {
1376         gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
1377         gc_heap::alloc_wait_event_p = TRUE;
1378
1379 try_again_top:
1380
1381         Interlocked::Increment (&foreground_request_count);
1382
1383 try_again_no_inc:
1384         dprintf(2, ("Waiting sync gc point"));
1385         assert (foreground_allowed.IsValid());
1386         assert (foreground_complete.IsValid());
1387
1388         cooperative_mode = gc_heap::enable_preemptive ();
1389
1390         foreground_allowed.Wait(INFINITE, FALSE);
1391
1392         dprintf(2, ("Waiting sync gc point is done"));
1393
1394         gc_heap::disable_preemptive (cooperative_mode);
1395
1396         if (foreground_gate)
1397         {
1398             Interlocked::Increment (&foreground_count);
1399             dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
1400             if (foreground_gate)
1401             {
1402                 gc_heap::settings.concurrent = FALSE;
1403                 return;
1404             }
1405             else
1406             {
1407                 end_foreground();
1408                 goto try_again_top;
1409             }
1410         }
1411         else
1412         {
1413             goto try_again_no_inc;
1414         }
1415     }
1416 }
1417
1418 void recursive_gc_sync::end_foreground()
1419 {
1420     dprintf (2, ("end_foreground"));
1421     if (gc_background_running)
1422     {
1423         Interlocked::Decrement (&foreground_request_count);
1424         dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
1425         if (Interlocked::Decrement (&foreground_count) == 0)
1426         {
1427             //c_write ((BOOL*)&foreground_gate, 0);
1428             // TODO - couldn't make the syntax work with Volatile<T>
1429             foreground_gate = 0;
1430             if (foreground_count == 0)
1431             {
1432                 foreground_allowed.Reset ();
1433                 dprintf(2, ("setting foreground complete event"));
1434                 foreground_complete.Set();
1435             }
1436         }
1437     }
1438 }
1439
1440 inline
1441 BOOL recursive_gc_sync::allow_foreground()
1442 {
1443     assert (gc_heap::settings.concurrent);
1444     dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
1445                    (int32_t)foreground_request_count, (int32_t)foreground_count));
1446
1447     BOOL did_fgc = FALSE;
1448
1449     //if we have suspended the EE, just return because
1450     //some thread could be waiting on this to proceed.
1451     if (!GCHeap::GcInProgress)
1452     {
1453         //TODO BACKGROUND_GC This is to stress the concurrency between
1454         //background and foreground
1455 //        gc_heap::disallow_new_allocation (0);
1456
1457         //GCToOSInterface::YieldThread(0);
1458
1459         //END of TODO
1460         if (foreground_request_count != 0)
1461         {
1462             //foreground wants to run
1463             //save the important settings
1464             //TODO BACKGROUND_GC be more selective about the important settings.
1465             gc_mechanisms saved_settings = gc_heap::settings;
1466             do
1467             {
1468                 did_fgc = TRUE;
1469                 //c_write ((BOOL*)&foreground_gate, 1);
1470                 // TODO - couldn't make the syntax work with Volatile<T>
1471                 foreground_gate = 1;
1472                 foreground_allowed.Set ();
1473                 foreground_complete.Wait (INFINITE, FALSE);
1474             }while (/*foreground_request_count ||*/ foreground_gate);
1475
1476             assert (!foreground_gate);
1477
1478             //restore the important settings
1479             gc_heap::settings = saved_settings;
1480             GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
1481             //the background GC shouldn't be using gc_high and gc_low
1482             //gc_low = lowest_address;
1483             //gc_high = highest_address;
1484         }
1485
1486         //TODO BACKGROUND_GC This is to stress the concurrency between
1487         //background and foreground
1488 //        gc_heap::allow_new_allocation (0);
1489         //END of TODO
1490     }
1491
1492     dprintf (100, ("leave allow_foreground"));
1493     assert (gc_heap::settings.concurrent);
1494     return did_fgc;
1495 }
1496
1497 #endif //BACKGROUND_GC
1498 #endif //DACCESS_COMPILE
1499
1500
1501 #if  defined(COUNT_CYCLES)
1502 #ifdef _MSC_VER
1503 #pragma warning(disable:4035)
1504 #endif //_MSC_VER
1505
1506 static
1507 unsigned        GetCycleCount32()        // enough for about 40 seconds
1508 {
1509 __asm   push    EDX
1510 __asm   _emit   0x0F
1511 __asm   _emit   0x31
1512 __asm   pop     EDX
1513 };
1514
1515 #pragma warning(default:4035)
1516
1517 #endif //COUNT_CYCLES
1518
1519 #ifdef TIME_GC
1520 int mark_time, plan_time, sweep_time, reloc_time, compact_time;
1521 #endif //TIME_GC
1522
1523 #ifndef MULTIPLE_HEAPS
1524
1525 #endif // MULTIPLE_HEAPS
1526
1527 void reset_memory (uint8_t* o, size_t sizeo);
1528
1529 #ifdef WRITE_WATCH
1530
1531 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1532 static bool virtual_alloc_hardware_write_watch = false;
1533 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1534
1535 static bool hardware_write_watch_capability = false;
1536
1537 #ifndef DACCESS_COMPILE
1538
1539 //check if the write watch APIs are supported.
1540
1541 void hardware_write_watch_api_supported()
1542 {
1543     if (GCToOSInterface::SupportsWriteWatch())
1544     {
1545         hardware_write_watch_capability = true;
1546         dprintf (2, ("WriteWatch supported"));
1547     }
1548     else
1549     {
1550         dprintf (2,("WriteWatch not supported"));
1551     }
1552 }
1553
1554 #endif //!DACCESS_COMPILE
1555
1556 inline bool can_use_hardware_write_watch()
1557 {
1558     return hardware_write_watch_capability;
1559 }
1560
1561 inline bool can_use_write_watch_for_gc_heap()
1562 {
1563 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1564     return true;
1565 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1566     return can_use_hardware_write_watch();
1567 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1568 }
1569
1570 inline bool can_use_write_watch_for_card_table()
1571 {
1572 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
1573     return true;
1574 #else
1575     return can_use_hardware_write_watch();
1576 #endif
1577 }
1578
1579 #else
1580 #define mem_reserve (MEM_RESERVE)
1581 #endif //WRITE_WATCH
1582
1583 //check if the low memory notification is supported
1584
1585 #ifndef DACCESS_COMPILE
1586
1587 void WaitLongerNoInstru (int i)
1588 {
1589     // every 8th attempt:
1590     bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1591
1592     // if we're waiting for gc to finish, we should block immediately
1593     if (g_fSuspensionPending == 0)
1594     {
1595         if  (g_num_processors > 1)
1596         {
1597             YieldProcessor();           // indicate to the processor that we are spining
1598             if  (i & 0x01f)
1599                 GCToOSInterface::YieldThread (0);
1600             else
1601                 GCToOSInterface::Sleep (5);
1602         }
1603         else
1604             GCToOSInterface::Sleep (5);
1605     }
1606
1607     // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1608     // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1609     // It is important that the thread is going to wait for GC.  Otherwise the thread
1610     // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD.
1611     if (bToggleGC)
1612     {
1613 #ifdef _DEBUG
1614         // In debug builds, all enter_spin_lock operations go through this code.  If a GC has
1615         // started, it is important to block until the GC thread calls set_gc_done (since it is
1616         // guaranteed to have cleared g_TrapReturningThreads by this point).  This avoids livelock
1617         // conditions which can otherwise occur if threads are allowed to spin in this function
1618         // (and therefore starve the GC thread) between the point when the GC thread sets the
1619         // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads.
1620         if (gc_heap::gc_started)
1621         {
1622             gc_heap::wait_for_gc_done();
1623         }
1624 #endif // _DEBUG
1625         GCToEEInterface::DisablePreemptiveGC();
1626     }
1627     else if (g_fSuspensionPending > 0)
1628     {
1629         g_theGCHeap->WaitUntilGCComplete();
1630     }
1631 }
1632
1633 inline
1634 static void safe_switch_to_thread()
1635 {
1636     bool cooperative_mode = gc_heap::enable_preemptive();
1637
1638     GCToOSInterface::YieldThread(0);
1639
1640     gc_heap::disable_preemptive(cooperative_mode);
1641 }
1642
1643 //
1644 // We need the following methods to have volatile arguments, so that they can accept
1645 // raw pointers in addition to the results of the & operator on Volatile<T>.
1646 //
1647 inline
1648 static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1649 {
1650 retry:
1651
1652     if (Interlocked::CompareExchange(lock, 0, -1) >= 0)
1653     {
1654         unsigned int i = 0;
1655         while (VolatileLoad(lock) >= 0)
1656         {
1657             if ((++i & 7) && !IsGCInProgress())
1658             {
1659                 if  (g_num_processors > 1)
1660                 {
1661 #ifndef MULTIPLE_HEAPS
1662                     int spin_count = 1024 * g_num_processors;
1663 #else //!MULTIPLE_HEAPS
1664                     int spin_count = 32 * g_num_processors;
1665 #endif //!MULTIPLE_HEAPS
1666                     for (int j = 0; j < spin_count; j++)
1667                     {
1668                         if  (VolatileLoad(lock) < 0 || IsGCInProgress())
1669                             break;
1670                         YieldProcessor();           // indicate to the processor that we are spining
1671                     }
1672                     if  (VolatileLoad(lock) >= 0 && !IsGCInProgress())
1673                     {
1674                         safe_switch_to_thread();
1675                     }
1676                 }
1677                 else
1678                 {
1679                     safe_switch_to_thread();
1680                 }
1681             }
1682             else
1683             {
1684                 WaitLongerNoInstru(i);
1685             }
1686         }
1687         goto retry;
1688     }
1689 }
1690
1691 inline
1692 static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
1693 {
1694     return (Interlocked::CompareExchange(&*lock, 0, -1) < 0);
1695 }
1696
1697 inline
1698 static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1699 {
1700     VolatileStore<int32_t>((int32_t*)lock, -1);
1701 }
1702
1703 #ifdef _DEBUG
1704
1705 inline
1706 static void enter_spin_lock(GCSpinLock *pSpinLock)
1707 {
1708     enter_spin_lock_noinstru(&pSpinLock->lock);
1709     assert (pSpinLock->holding_thread == (Thread*)-1);
1710     pSpinLock->holding_thread = GCToEEInterface::GetThread();
1711 }
1712
1713 inline
1714 static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock)
1715 {
1716     BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock);
1717     if (ret)
1718         pSpinLock->holding_thread = GCToEEInterface::GetThread();
1719     return ret;
1720 }
1721
1722 inline
1723 static void leave_spin_lock(GCSpinLock *pSpinLock)
1724 {
1725     bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC();
1726 //    _ASSERTE((pSpinLock->holding_thread == GCToEEInterface::GetThread()) || gc_thread_p || pSpinLock->released_by_gc_p);
1727     pSpinLock->released_by_gc_p = gc_thread_p;
1728     pSpinLock->holding_thread = (Thread*) -1;
1729     if (pSpinLock->lock != -1)
1730         leave_spin_lock_noinstru(&pSpinLock->lock);
1731 }
1732
1733 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \
1734     _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread());
1735
1736 #define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \
1737     _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread());
1738
1739 #else //_DEBUG
1740
1741 //In the concurrent version, the Enable/DisablePreemptiveGC is optional because
1742 //the gc thread call WaitLonger.
1743 void WaitLonger (int i
1744 #ifdef SYNCHRONIZATION_STATS
1745     , GCSpinLock* spin_lock
1746 #endif //SYNCHRONIZATION_STATS
1747     )
1748 {
1749 #ifdef SYNCHRONIZATION_STATS
1750     (spin_lock->num_wait_longer)++;
1751 #endif //SYNCHRONIZATION_STATS
1752
1753     // every 8th attempt:
1754     bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1755     assert (bToggleGC);
1756
1757     // if we're waiting for gc to finish, we should block immediately
1758     if (!gc_heap::gc_started)
1759     {
1760 #ifdef SYNCHRONIZATION_STATS
1761         (spin_lock->num_switch_thread_w)++;
1762 #endif //SYNCHRONIZATION_STATS
1763         if  (g_num_processors > 1)
1764         {
1765             YieldProcessor();           // indicate to the processor that we are spining
1766             if  (i & 0x01f)
1767                 GCToOSInterface::YieldThread (0);
1768             else
1769                 GCToOSInterface::Sleep (5);
1770         }
1771         else
1772             GCToOSInterface::Sleep (5);
1773     }
1774
1775     // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1776     // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1777     // It is important that the thread is going to wait for GC.  Otherwise the thread
1778     // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD. 
1779     if (gc_heap::gc_started)
1780     {
1781         gc_heap::wait_for_gc_done();
1782     }
1783
1784     if (bToggleGC)
1785     {
1786 #ifdef SYNCHRONIZATION_STATS
1787         (spin_lock->num_disable_preemptive_w)++;
1788 #endif //SYNCHRONIZATION_STATS
1789         GCToEEInterface::DisablePreemptiveGC();
1790     }
1791 }
1792
1793 inline
1794 static void enter_spin_lock (GCSpinLock* spin_lock)
1795 {
1796 retry:
1797
1798     if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0)
1799     {
1800         unsigned int i = 0;
1801         while (spin_lock->lock >= 0)
1802         {
1803             if ((++i & 7) && !gc_heap::gc_started)
1804             {
1805                 if  (g_num_processors > 1)
1806                 {
1807 #ifndef MULTIPLE_HEAPS
1808                     int spin_count = 1024 * g_num_processors;
1809 #else //!MULTIPLE_HEAPS
1810                     int spin_count = 32 * g_num_processors;
1811 #endif //!MULTIPLE_HEAPS
1812                     for (int j = 0; j < spin_count; j++)
1813                     {
1814                         if  (spin_lock->lock < 0 || gc_heap::gc_started)
1815                             break;
1816                         YieldProcessor();           // indicate to the processor that we are spining
1817                     }
1818                     if  (spin_lock->lock >= 0 && !gc_heap::gc_started)
1819                     {
1820 #ifdef SYNCHRONIZATION_STATS
1821                         (spin_lock->num_switch_thread)++;
1822 #endif //SYNCHRONIZATION_STATS
1823                         bool cooperative_mode = gc_heap::enable_preemptive ();
1824
1825                         GCToOSInterface::YieldThread(0);
1826
1827                         gc_heap::disable_preemptive (cooperative_mode);
1828                     }
1829                 }
1830                 else
1831                     GCToOSInterface::YieldThread(0);
1832             }
1833             else
1834             {
1835                 WaitLonger(i
1836 #ifdef SYNCHRONIZATION_STATS
1837                         , spin_lock
1838 #endif //SYNCHRONIZATION_STATS
1839                     );
1840             }
1841         }
1842         goto retry;
1843     }
1844 }
1845
1846 inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
1847 {
1848     return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0);
1849 }
1850
1851 inline
1852 static void leave_spin_lock (GCSpinLock * spin_lock)
1853 {
1854     spin_lock->lock = -1;
1855 }
1856
1857 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock)
1858
1859 #endif //_DEBUG
1860
1861 bool gc_heap::enable_preemptive ()
1862 {
1863     return GCToEEInterface::EnablePreemptiveGC();
1864 }
1865
1866 void gc_heap::disable_preemptive (bool restore_cooperative)
1867 {
1868     if (restore_cooperative)
1869     {
1870         GCToEEInterface::DisablePreemptiveGC();
1871     }
1872 }
1873
1874 #endif // !DACCESS_COMPILE
1875
1876 typedef void **  PTR_PTR;
1877 //This function clears a piece of memory
1878 // size has to be Dword aligned
1879
1880 inline
1881 void memclr ( uint8_t* mem, size_t size)
1882 {
1883     dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
1884     assert ((size & (sizeof(PTR_PTR)-1)) == 0);
1885     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1886
1887 #if 0
1888     // The compiler will recognize this pattern and replace it with memset call. We can as well just call 
1889     // memset directly to make it obvious what's going on.
1890     PTR_PTR m = (PTR_PTR) mem;
1891     for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
1892         *(m++) = 0;
1893 #endif
1894
1895     memset (mem, 0, size);
1896 }
1897
1898 void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
1899 {
1900     const size_t sz4ptr = sizeof(PTR_PTR)*4;
1901     const size_t sz2ptr = sizeof(PTR_PTR)*2;
1902     const size_t sz1ptr = sizeof(PTR_PTR)*1;
1903
1904     // size must be a multiple of the pointer size
1905     assert ((size & (sizeof (PTR_PTR)-1)) == 0);
1906     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1907
1908     // copy in groups of four pointer sized things at a time
1909     if (size >= sz4ptr)
1910     {
1911         do
1912         {
1913             ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1914             ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1915             ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2];
1916             ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3];
1917             dmem += sz4ptr;
1918             smem += sz4ptr;
1919         }
1920         while ((size -= sz4ptr) >= sz4ptr);
1921     }
1922
1923     // still two pointer sized things or more left to copy?
1924     if (size & sz2ptr)
1925     {
1926         ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1927         ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1928         dmem += sz2ptr;
1929         smem += sz2ptr;
1930     }
1931
1932     // still one pointer sized thing left to copy?
1933     if (size & sz1ptr)
1934     {
1935         ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1936         // dmem += sz1ptr;
1937         // smem += sz1ptr;
1938     }
1939
1940 }
1941
1942 inline
1943 ptrdiff_t round_down (ptrdiff_t add, int pitch)
1944 {
1945     return ((add / pitch) * pitch);
1946 }
1947
1948 #if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT)
1949 // FEATURE_STRUCTALIGN allows the compiler to dictate the alignment,
1950 // i.e, if a larger alignment matters or is beneficial, the compiler
1951 // generated info tells us so.  RESPECT_LARGE_ALIGNMENT is just the
1952 // converse - it's a heuristic for the GC to use a larger alignment.
1953 #error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT
1954 #endif
1955
1956 #if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION)
1957 #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
1958 #endif
1959
1960 #if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
1961 #error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
1962 #endif
1963
1964 inline
1965 BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
1966 {
1967 #ifdef RESPECT_LARGE_ALIGNMENT
1968     return ((((size_t)p1 ^ (size_t)p2) & 7) == 0);
1969 #else
1970     UNREFERENCED_PARAMETER(p1);
1971     UNREFERENCED_PARAMETER(p2);
1972     return TRUE;
1973 #endif //RESPECT_LARGE_ALIGNMENT
1974 }
1975
1976 inline 
1977 size_t switch_alignment_size (BOOL already_padded_p)
1978 {
1979     if (already_padded_p)
1980         return DATA_ALIGNMENT;
1981     else
1982         return (Align (min_obj_size) +((Align (min_obj_size)&DATA_ALIGNMENT)^DATA_ALIGNMENT));
1983 }
1984
1985
1986 #ifdef FEATURE_STRUCTALIGN
1987 void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad);
1988 void clear_node_aligninfo (uint8_t *node);
1989 #else // FEATURE_STRUCTALIGN
1990 #define node_realigned(node)    (((plug_and_reloc*)(node))[-1].reloc & 1)
1991 void set_node_realigned (uint8_t* node);
1992 void clear_node_realigned(uint8_t* node);
1993 #endif // FEATURE_STRUCTALIGN
1994
1995 inline
1996 size_t AlignQword (size_t nbytes)
1997 {
1998 #ifdef FEATURE_STRUCTALIGN
1999     // This function is used to align everything on the large object
2000     // heap to an 8-byte boundary, to reduce the number of unaligned
2001     // accesses to (say) arrays of doubles.  With FEATURE_STRUCTALIGN,
2002     // the compiler dictates the optimal alignment instead of having
2003     // a heuristic in the GC.
2004     return Align (nbytes);
2005 #else // FEATURE_STRUCTALIGN
2006     return (nbytes + 7) & ~7;
2007 #endif // FEATURE_STRUCTALIGN
2008 }
2009
2010 inline
2011 BOOL Aligned (size_t n)
2012 {
2013     return (n & ALIGNCONST) == 0;
2014 }
2015
2016 #define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *))
2017
2018 #ifdef FEATURE_STRUCTALIGN
2019 #define MAX_STRUCTALIGN OS_PAGE_SIZE
2020 #else // FEATURE_STRUCTALIGN
2021 #define MAX_STRUCTALIGN 0
2022 #endif // FEATURE_STRUCTALIGN
2023
2024 #ifdef FEATURE_STRUCTALIGN
2025 inline
2026 ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment)
2027 {
2028     // The resulting alignpad must be either 0 or at least min_obj_size.
2029     // Note that by computing the following difference on unsigned types,
2030     // we can do the range check 0 < alignpad < min_obj_size with a
2031     // single conditional branch.
2032     if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT)
2033     {
2034         return requiredAlignment;
2035     }
2036     return 0;
2037 }
2038
2039 inline
2040 uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2041 {
2042     // required alignment must be a power of two
2043     _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0);
2044     _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0);
2045     _ASSERTE(requiredAlignment >= sizeof(void *));
2046     _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN);
2047
2048     // When this method is invoked for individual objects (i.e., alignmentOffset
2049     // is just the size of the PostHeader), what needs to be aligned when
2050     // we're done is the pointer to the payload of the object (which means
2051     // the actual resulting object pointer is typically not aligned).
2052
2053     uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset;
2054     ptrdiff_t alignpad = result - origPtr;
2055
2056     return result + AdjustmentForMinPadSize (alignpad, requiredAlignment);
2057 }
2058
2059 inline
2060 ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2061 {
2062     return StructAlign (plug, requiredAlignment, alignmentOffset) - plug;
2063 }
2064
2065 BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment)
2066 {
2067     return StructAlign (ptr, requiredAlignment) == ptr;
2068 }
2069
2070 inline
2071 ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment)
2072 {
2073     if (requiredAlignment == DATA_ALIGNMENT)
2074         return 0;
2075     // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the
2076     // alignment padding object), the worst-case alignment padding is correspondingly larger
2077     // than the required alignment.
2078     return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT;
2079 }
2080
2081 inline
2082 ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment)
2083 {
2084     if (requiredAlignment <= get_alignment_constant (TRUE)+1)
2085         return 0;
2086     // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space
2087     // for padding before the actual object, it also leaves space for filling a gap after the
2088     // actual object.  This is needed on the large object heap, as the outer allocation functions
2089     // don't operate on an allocation context (which would have left space for the final gap).
2090     return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT;
2091 }
2092
2093 uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext)
2094 {
2095     uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2096     if (alignedPtr != newAlloc) {
2097         make_unused_array (newAlloc, alignedPtr - newAlloc);
2098     }
2099     acontext->alloc_ptr = alignedPtr + Align (size);
2100     return alignedPtr;
2101 }
2102
2103 uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size)
2104 {
2105     uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2106     if (alignedPtr != newAlloc) {
2107         make_unused_array (newAlloc, alignedPtr - newAlloc);
2108     }
2109     if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) {
2110         make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr);
2111     }
2112     return alignedPtr;
2113 }
2114 #else // FEATURE_STRUCTALIGN
2115 #define ComputeMaxStructAlignPad(requiredAlignment) 0
2116 #define ComputeMaxStructAlignPadLarge(requiredAlignment) 0
2117 #endif // FEATURE_STRUCTALIGN
2118
2119 //CLR_SIZE  is the max amount of bytes from gen0 that is set to 0 in one chunk
2120 #ifdef SERVER_GC
2121 #define CLR_SIZE ((size_t)(8*1024))
2122 #else //SERVER_GC
2123 #define CLR_SIZE ((size_t)(8*1024))
2124 #endif //SERVER_GC
2125
2126 #define END_SPACE_AFTER_GC (LARGE_OBJECT_SIZE + MAX_STRUCTALIGN)
2127
2128 #ifdef BACKGROUND_GC
2129 #define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE)
2130 #else
2131 #define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE)
2132 #endif //BACKGROUND_GC
2133
2134 #ifdef SERVER_GC
2135
2136 #ifdef BIT64
2137
2138 #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024))
2139 #define LHEAP_ALLOC   ((size_t)(1024*1024*256))
2140
2141 #else
2142
2143 #define INITIAL_ALLOC ((size_t)(1024*1024*64))
2144 #define LHEAP_ALLOC   ((size_t)(1024*1024*32))
2145
2146 #endif  // BIT64
2147
2148 #else //SERVER_GC
2149
2150 #ifdef BIT64
2151
2152 #define INITIAL_ALLOC ((size_t)(1024*1024*256))
2153 #define LHEAP_ALLOC   ((size_t)(1024*1024*128))
2154
2155 #else
2156
2157 #define INITIAL_ALLOC ((size_t)(1024*1024*16))
2158 #define LHEAP_ALLOC   ((size_t)(1024*1024*16))
2159
2160 #endif  // BIT64
2161
2162 #endif //SERVER_GC
2163
2164 //amount in bytes of the etw allocation tick
2165 const size_t etw_allocation_tick = 100*1024;
2166
2167 const size_t low_latency_alloc = 256*1024;
2168
2169 const size_t fgn_check_quantum = 2*1024*1024;
2170
2171 #ifdef MH_SC_MARK
2172 const int max_snoop_level = 128;
2173 #endif //MH_SC_MARK
2174
2175
2176 #ifdef CARD_BUNDLE
2177 //threshold of heap size to turn on card bundles.
2178 #define SH_TH_CARD_BUNDLE  (40*1024*1024)
2179 #define MH_TH_CARD_BUNDLE  (180*1024*1024)
2180 #endif //CARD_BUNDLE
2181
2182 #define GC_EPHEMERAL_DECOMMIT_TIMEOUT 5000
2183
2184 inline
2185 size_t align_on_page (size_t add)
2186 {
2187     return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1));
2188 }
2189
2190 inline
2191 uint8_t* align_on_page (uint8_t* add)
2192 {
2193     return (uint8_t*)align_on_page ((size_t) add);
2194 }
2195
2196 inline
2197 size_t align_lower_page (size_t add)
2198 {
2199     return (add & ~((size_t)OS_PAGE_SIZE - 1));
2200 }
2201
2202 inline
2203 uint8_t* align_lower_page (uint8_t* add)
2204 {
2205     return (uint8_t*)align_lower_page ((size_t)add);
2206 }
2207
2208 inline
2209 size_t align_write_watch_lower_page (size_t add)
2210 {
2211     return (add & ~(WRITE_WATCH_UNIT_SIZE - 1));
2212 }
2213
2214 inline
2215 uint8_t* align_write_watch_lower_page (uint8_t* add)
2216 {
2217     return (uint8_t*)align_lower_page ((size_t)add);
2218 }
2219
2220
2221 inline
2222 BOOL power_of_two_p (size_t integer)
2223 {
2224     return !(integer & (integer-1));
2225 }
2226
2227 inline
2228 BOOL oddp (size_t integer)
2229 {
2230     return (integer & 1) != 0;
2231 }
2232
2233 // we only ever use this for WORDs.
2234 size_t logcount (size_t word)
2235 {
2236     //counts the number of high bits in a 16 bit word.
2237     assert (word < 0x10000);
2238     size_t count;
2239     count = (word & 0x5555) + ( (word >> 1 ) & 0x5555);
2240     count = (count & 0x3333) + ( (count >> 2) & 0x3333);
2241     count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F);
2242     count = (count & 0x00FF) + ( (count >> 8) & 0x00FF);
2243     return count;
2244 }
2245
2246 #ifndef DACCESS_COMPILE
2247
2248 void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
2249 {
2250     WriteBarrierParameters args = {};
2251     args.operation = WriteBarrierOp::StompResize;
2252     args.is_runtime_suspended = is_runtime_suspended;
2253     args.requires_upper_bounds_check = requires_upper_bounds_check;
2254
2255     args.card_table = g_gc_card_table;
2256 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2257     args.card_bundle_table = g_gc_card_bundle_table;
2258 #endif
2259
2260     args.lowest_address = g_gc_lowest_address;
2261     args.highest_address = g_gc_highest_address;
2262
2263 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2264     if (SoftwareWriteWatch::IsEnabledForGCHeap())
2265     {
2266         args.write_watch_table = g_gc_sw_ww_table;
2267     }
2268 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2269
2270     GCToEEInterface::StompWriteBarrier(&args);
2271 }
2272
2273 void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2274 {
2275     WriteBarrierParameters args = {};
2276     args.operation = WriteBarrierOp::StompEphemeral;
2277     args.is_runtime_suspended = true;
2278     args.ephemeral_low = ephemeral_low;
2279     args.ephemeral_high = ephemeral_high;
2280     GCToEEInterface::StompWriteBarrier(&args);
2281 }
2282
2283 void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2284 {
2285     WriteBarrierParameters args = {};
2286     args.operation = WriteBarrierOp::Initialize;
2287     args.is_runtime_suspended = true;
2288     args.requires_upper_bounds_check = false;
2289     args.card_table = g_gc_card_table;
2290
2291 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2292     args.card_bundle_table = g_gc_card_bundle_table;
2293 #endif
2294     
2295     args.lowest_address = g_gc_lowest_address;
2296     args.highest_address = g_gc_highest_address;
2297     args.ephemeral_low = ephemeral_low;
2298     args.ephemeral_high = ephemeral_high;
2299     GCToEEInterface::StompWriteBarrier(&args);
2300 }
2301
2302 #endif // DACCESS_COMPILE
2303
2304 //extract the low bits [0,low[ of a uint32_t
2305 #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
2306 //extract the high bits [high, 32] of a uint32_t
2307 #define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1))
2308
2309 // Things we need to manually initialize:
2310 // gen0 min_size - based on cache
2311 // gen0/1 max_size - based on segment size
2312 static static_data static_data_table[latency_level_last - latency_level_first + 1][NUMBERGENERATIONS] = 
2313 {
2314     // latency_level_memory_footprint
2315     {
2316         // gen0
2317         {0, 0, 40000, 0.5f, 9.0f, 20.0f, 1000, 1},
2318         // gen1
2319         {163840, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2320         // gen2
2321         {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2322         // gen3
2323         {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2324     },
2325
2326     // latency_level_balanced
2327     {
2328         // gen0
2329         {0, 0, 40000, 0.5f,
2330 #ifdef MULTIPLE_HEAPS
2331             20.0f, 40.0f,
2332 #else
2333             9.0f, 20.0f,
2334 #endif //MULTIPLE_HEAPS
2335             1000, 1},
2336         // gen1
2337         {9*32*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2338         // gen2
2339         {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2340         // gen3
2341         {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2342     },
2343 };
2344
2345 class mark;
2346 class generation;
2347 class heap_segment;
2348 class CObjectHeader;
2349 class dynamic_data;
2350 class l_heap;
2351 class sorted_table;
2352 class c_synchronize;
2353
2354 #ifdef FEATURE_PREMORTEM_FINALIZATION
2355 #ifndef DACCESS_COMPILE
2356 static
2357 HRESULT AllocateCFinalize(CFinalize **pCFinalize);
2358 #endif //!DACCESS_COMPILE
2359 #endif // FEATURE_PREMORTEM_FINALIZATION
2360
2361 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address);
2362
2363
2364 #ifdef USE_INTROSORT
2365 #define _sort introsort::sort
2366 #else //USE_INTROSORT
2367 #define _sort qsort1
2368 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
2369 #endif //USE_INTROSORT
2370
2371 void* virtual_alloc (size_t size);
2372 void virtual_free (void* add, size_t size);
2373
2374 /* per heap static initialization */
2375 #ifdef MARK_ARRAY
2376 #ifndef MULTIPLE_HEAPS
2377 uint32_t*   gc_heap::mark_array;
2378 #endif //MULTIPLE_HEAPS
2379 #endif //MARK_ARRAY
2380
2381 #ifdef MARK_LIST
2382 uint8_t**   gc_heap::g_mark_list;
2383
2384 #ifdef PARALLEL_MARK_LIST_SORT
2385 uint8_t**   gc_heap::g_mark_list_copy;
2386 #endif //PARALLEL_MARK_LIST_SORT
2387
2388 size_t      gc_heap::mark_list_size;
2389 #endif //MARK_LIST
2390
2391 #ifdef SEG_MAPPING_TABLE
2392 seg_mapping* seg_mapping_table;
2393 #endif //SEG_MAPPING_TABLE
2394
2395 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2396 sorted_table* gc_heap::seg_table;
2397 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2398
2399 #ifdef MULTIPLE_HEAPS
2400 GCEvent     gc_heap::ee_suspend_event;
2401 size_t      gc_heap::min_balance_threshold = 0;
2402 #endif //MULTIPLE_HEAPS
2403
2404 VOLATILE(BOOL) gc_heap::gc_started;
2405
2406 #ifdef MULTIPLE_HEAPS
2407
2408 GCEvent     gc_heap::gc_start_event;
2409
2410 bool        gc_heap::gc_thread_no_affinitize_p = false;
2411
2412 int         gc_heap::n_heaps;
2413
2414 gc_heap**   gc_heap::g_heaps;
2415
2416 size_t*     gc_heap::g_promoted;
2417
2418 #ifdef MH_SC_MARK
2419 int*        gc_heap::g_mark_stack_busy;
2420 #endif //MH_SC_MARK
2421
2422
2423 #ifdef BACKGROUND_GC
2424 size_t*     gc_heap::g_bpromoted;
2425 #endif //BACKGROUND_GC
2426
2427 #else  //MULTIPLE_HEAPS
2428
2429 size_t      gc_heap::g_promoted;
2430
2431 #ifdef BACKGROUND_GC
2432 size_t      gc_heap::g_bpromoted;
2433 #endif //BACKGROUND_GC
2434
2435 #endif //MULTIPLE_HEAPS
2436
2437 size_t      gc_heap::reserved_memory = 0;
2438 size_t      gc_heap::reserved_memory_limit = 0;
2439 BOOL        gc_heap::g_low_memory_status;
2440
2441 #ifndef DACCESS_COMPILE
2442 static gc_reason gc_trigger_reason = reason_empty;
2443 #endif //DACCESS_COMPILE
2444
2445 gc_latency_level gc_heap::latency_level = latency_level_default;
2446
2447 gc_mechanisms  gc_heap::settings;
2448
2449 gc_history_global gc_heap::gc_data_global;
2450
2451 size_t      gc_heap::gc_last_ephemeral_decommit_time = 0;
2452
2453 size_t      gc_heap::gc_gen0_desired_high;
2454
2455 #ifdef SHORT_PLUGS
2456 double       gc_heap::short_plugs_pad_ratio = 0;
2457 #endif //SHORT_PLUGS
2458
2459 #if defined(BIT64)
2460 #define MAX_ALLOWED_MEM_LOAD 85
2461
2462 // consider putting this in dynamic data -
2463 // we may want different values for workstation
2464 // and server GC.
2465 #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024)
2466
2467 size_t      gc_heap::youngest_gen_desired_th;
2468 #endif //BIT64
2469
2470 uint32_t    gc_heap::last_gc_memory_load = 0;
2471
2472 size_t      gc_heap::last_gc_heap_size = 0;
2473
2474 size_t      gc_heap::last_gc_fragmentation = 0;
2475
2476 uint64_t    gc_heap::mem_one_percent = 0;
2477
2478 uint32_t    gc_heap::high_memory_load_th = 0;
2479
2480 uint64_t    gc_heap::total_physical_mem = 0;
2481
2482 uint64_t    gc_heap::entry_available_physical_mem = 0;
2483
2484 #ifdef BACKGROUND_GC
2485 GCEvent     gc_heap::bgc_start_event;
2486
2487 gc_mechanisms gc_heap::saved_bgc_settings;
2488
2489 GCEvent     gc_heap::background_gc_done_event;
2490
2491 GCEvent     gc_heap::ee_proceed_event;
2492
2493 bool        gc_heap::gc_can_use_concurrent = false;
2494
2495 bool        gc_heap::temp_disable_concurrent_p = false;
2496
2497 uint32_t    gc_heap::cm_in_progress = FALSE;
2498
2499 BOOL        gc_heap::dont_restart_ee_p = FALSE;
2500
2501 BOOL        gc_heap::keep_bgc_threads_p = FALSE;
2502
2503 GCEvent     gc_heap::bgc_threads_sync_event;
2504
2505 BOOL        gc_heap::do_ephemeral_gc_p = FALSE;
2506
2507 BOOL        gc_heap::do_concurrent_p = FALSE;
2508
2509 size_t      gc_heap::ephemeral_fgc_counts[max_generation];
2510
2511 BOOL        gc_heap::alloc_wait_event_p = FALSE;
2512
2513 VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
2514
2515 #endif //BACKGROUND_GC
2516
2517 #ifndef MULTIPLE_HEAPS
2518 #ifdef SPINLOCK_HISTORY
2519 int         gc_heap::spinlock_info_index = 0;
2520 spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8];
2521 #endif //SPINLOCK_HISTORY
2522
2523 size_t      gc_heap::fgn_last_alloc = 0;
2524
2525 int         gc_heap::generation_skip_ratio = 100;
2526
2527 uint64_t    gc_heap::loh_alloc_since_cg = 0;
2528
2529 BOOL        gc_heap::elevation_requested = FALSE;
2530
2531 BOOL        gc_heap::last_gc_before_oom = FALSE;
2532
2533 #ifdef BACKGROUND_GC
2534 uint8_t*    gc_heap::background_saved_lowest_address = 0;
2535 uint8_t*    gc_heap::background_saved_highest_address = 0;
2536 uint8_t*    gc_heap::next_sweep_obj = 0;
2537 uint8_t*    gc_heap::current_sweep_pos = 0;
2538 exclusive_sync* gc_heap::bgc_alloc_lock;
2539 #endif //BACKGROUND_GC
2540
2541 oom_history gc_heap::oom_info;
2542
2543 fgm_history gc_heap::fgm_result;
2544
2545 BOOL        gc_heap::ro_segments_in_range;
2546
2547 size_t      gc_heap::gen0_big_free_spaces = 0;
2548
2549 uint8_t*    gc_heap::ephemeral_low;
2550
2551 uint8_t*    gc_heap::ephemeral_high;
2552
2553 uint8_t*    gc_heap::lowest_address;
2554
2555 uint8_t*    gc_heap::highest_address;
2556
2557 BOOL        gc_heap::ephemeral_promotion;
2558
2559 uint8_t*    gc_heap::saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2560 size_t      gc_heap::saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2561
2562 short*      gc_heap::brick_table;
2563
2564 uint32_t*   gc_heap::card_table;
2565
2566 #ifdef CARD_BUNDLE
2567 uint32_t*   gc_heap::card_bundle_table;
2568 #endif //CARD_BUNDLE
2569
2570 uint8_t*    gc_heap::gc_low;
2571
2572 uint8_t*    gc_heap::gc_high;
2573
2574 uint8_t*    gc_heap::demotion_low;
2575
2576 uint8_t*    gc_heap::demotion_high;
2577
2578 BOOL        gc_heap::demote_gen1_p = TRUE;
2579
2580 uint8_t*    gc_heap::last_gen1_pin_end;
2581
2582 gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons;
2583
2584 size_t      gc_heap::etw_allocation_running_amount[2];
2585
2586 int         gc_heap::gc_policy = 0;
2587
2588 size_t      gc_heap::allocation_running_time;
2589
2590 size_t      gc_heap::allocation_running_amount;
2591
2592 heap_segment* gc_heap::ephemeral_heap_segment = 0;
2593
2594 BOOL        gc_heap::blocking_collection = FALSE;
2595
2596 heap_segment* gc_heap::freeable_large_heap_segment = 0;
2597
2598 size_t      gc_heap::time_bgc_last = 0;
2599
2600 size_t      gc_heap::mark_stack_tos = 0;
2601
2602 size_t      gc_heap::mark_stack_bos = 0;
2603
2604 size_t      gc_heap::mark_stack_array_length = 0;
2605
2606 mark*       gc_heap::mark_stack_array = 0;
2607
2608 BOOL        gc_heap::verify_pinned_queue_p = FALSE;
2609
2610 uint8_t*    gc_heap::oldest_pinned_plug = 0;
2611
2612 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2613 size_t      gc_heap::num_pinned_objects = 0;
2614 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2615
2616 #ifdef FEATURE_LOH_COMPACTION
2617 size_t      gc_heap::loh_pinned_queue_tos = 0;
2618
2619 size_t      gc_heap::loh_pinned_queue_bos = 0;
2620
2621 size_t      gc_heap::loh_pinned_queue_length = 0;
2622
2623 mark*       gc_heap::loh_pinned_queue = 0;
2624
2625 BOOL        gc_heap::loh_compacted_p = FALSE;
2626 #endif //FEATURE_LOH_COMPACTION
2627
2628 #ifdef BACKGROUND_GC
2629
2630 EEThreadId  gc_heap::bgc_thread_id;
2631
2632 uint8_t*    gc_heap::background_written_addresses [array_size+2];
2633
2634 heap_segment* gc_heap::freeable_small_heap_segment = 0;
2635
2636 size_t      gc_heap::bgc_overflow_count = 0;
2637
2638 size_t      gc_heap::bgc_begin_loh_size = 0;
2639 size_t      gc_heap::end_loh_size = 0;
2640
2641 uint32_t    gc_heap::bgc_alloc_spin_loh = 0;
2642
2643 size_t      gc_heap::bgc_loh_size_increased = 0;
2644
2645 size_t      gc_heap::bgc_loh_allocated_in_free = 0;
2646
2647 size_t      gc_heap::background_soh_alloc_count = 0;
2648
2649 size_t      gc_heap::background_loh_alloc_count = 0;
2650
2651 uint8_t**   gc_heap::background_mark_stack_tos = 0;
2652
2653 uint8_t**   gc_heap::background_mark_stack_array = 0;
2654
2655 size_t      gc_heap::background_mark_stack_array_length = 0;
2656
2657 uint8_t*    gc_heap::background_min_overflow_address =0;
2658
2659 uint8_t*    gc_heap::background_max_overflow_address =0;
2660
2661 BOOL        gc_heap::processed_soh_overflow_p = FALSE;
2662
2663 uint8_t*    gc_heap::background_min_soh_overflow_address =0;
2664
2665 uint8_t*    gc_heap::background_max_soh_overflow_address =0;
2666
2667 heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0;
2668
2669 uint8_t*    gc_heap::saved_sweep_ephemeral_start = 0;
2670
2671 heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0;
2672
2673 Thread*     gc_heap::bgc_thread = 0;
2674
2675 BOOL        gc_heap::expanded_in_fgc = FALSE;
2676
2677 uint8_t**   gc_heap::c_mark_list = 0;
2678
2679 size_t      gc_heap::c_mark_list_length = 0;
2680
2681 size_t      gc_heap::c_mark_list_index = 0;
2682
2683 gc_history_per_heap gc_heap::bgc_data_per_heap;
2684
2685 BOOL    gc_heap::bgc_thread_running;
2686
2687 CLRCriticalSection gc_heap::bgc_threads_timeout_cs;
2688
2689 GCEvent gc_heap::gc_lh_block_event;
2690
2691 #endif //BACKGROUND_GC
2692
2693 #ifdef MARK_LIST
2694 uint8_t**   gc_heap::mark_list;
2695 uint8_t**   gc_heap::mark_list_index;
2696 uint8_t**   gc_heap::mark_list_end;
2697 #endif //MARK_LIST
2698
2699 #ifdef SNOOP_STATS
2700 snoop_stats_data gc_heap::snoop_stat;
2701 #endif //SNOOP_STATS
2702
2703 uint8_t*    gc_heap::min_overflow_address = MAX_PTR;
2704
2705 uint8_t*    gc_heap::max_overflow_address = 0;
2706
2707 uint8_t*    gc_heap::shigh = 0;
2708
2709 uint8_t*    gc_heap::slow = MAX_PTR;
2710
2711 size_t      gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS];
2712
2713 size_t      gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
2714
2715 size_t      gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS];
2716
2717 size_t      gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS];
2718
2719 BOOL        gc_heap::ordered_plug_indices_init = FALSE;
2720
2721 BOOL        gc_heap::use_bestfit = FALSE;
2722
2723 uint8_t*    gc_heap::bestfit_first_pin = 0;
2724
2725 BOOL        gc_heap::commit_end_of_seg = FALSE;
2726
2727 size_t      gc_heap::max_free_space_items = 0;
2728
2729 size_t      gc_heap::free_space_buckets = 0;
2730
2731 size_t      gc_heap::free_space_items = 0;
2732
2733 int         gc_heap::trimmed_free_space_index = 0;
2734
2735 size_t      gc_heap::total_ephemeral_plugs = 0;
2736
2737 seg_free_spaces* gc_heap::bestfit_seg = 0;
2738
2739 size_t      gc_heap::total_ephemeral_size = 0;
2740
2741 #ifdef HEAP_ANALYZE
2742
2743 size_t      gc_heap::internal_root_array_length = initial_internal_roots;
2744
2745 uint8_t**   gc_heap::internal_root_array = 0;
2746
2747 size_t      gc_heap::internal_root_array_index = 0;
2748
2749 BOOL        gc_heap::heap_analyze_success = TRUE;
2750
2751 uint8_t*    gc_heap::current_obj = 0;
2752 size_t      gc_heap::current_obj_size = 0;
2753
2754 #endif //HEAP_ANALYZE
2755
2756 #ifdef GC_CONFIG_DRIVEN
2757 size_t gc_heap::interesting_data_per_gc[max_idp_count];
2758 //size_t gc_heap::interesting_data_per_heap[max_idp_count];
2759 //size_t gc_heap::interesting_mechanisms_per_heap[max_im_count];
2760 #endif //GC_CONFIG_DRIVEN
2761 #endif //MULTIPLE_HEAPS
2762
2763 no_gc_region_info gc_heap::current_no_gc_region_info;
2764 BOOL gc_heap::proceed_with_gc_p = FALSE;
2765 GCSpinLock gc_heap::gc_lock;
2766
2767 size_t gc_heap::eph_gen_starts_size = 0;
2768 heap_segment* gc_heap::segment_standby_list;
2769 size_t        gc_heap::last_gc_index = 0;
2770 #ifdef SEG_MAPPING_TABLE
2771 size_t        gc_heap::min_segment_size = 0;
2772 size_t        gc_heap::min_segment_size_shr = 0;
2773 #endif //SEG_MAPPING_TABLE
2774 size_t        gc_heap::soh_segment_size = 0;
2775 size_t        gc_heap::min_loh_segment_size = 0;
2776 size_t        gc_heap::segment_info_size = 0;
2777
2778 #ifdef GC_CONFIG_DRIVEN
2779 size_t gc_heap::time_init = 0;
2780 size_t gc_heap::time_since_init = 0;
2781 size_t gc_heap::compact_or_sweep_gcs[2];
2782 #endif //GC_CONFIG_DRIVEN
2783
2784 #ifdef FEATURE_LOH_COMPACTION
2785 BOOL                   gc_heap::loh_compaction_always_p = FALSE;
2786 gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
2787 int                    gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;
2788
2789 #endif //FEATURE_LOH_COMPACTION
2790
2791 GCEvent gc_heap::full_gc_approach_event;
2792
2793 GCEvent gc_heap::full_gc_end_event;
2794
2795 uint32_t gc_heap::fgn_maxgen_percent = 0;
2796
2797 uint32_t gc_heap::fgn_loh_percent = 0;
2798
2799 #ifdef BACKGROUND_GC
2800 BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE;
2801 #endif //BACKGROUND_GC
2802
2803 VOLATILE(bool) gc_heap::full_gc_approach_event_set;
2804
2805 size_t gc_heap::full_gc_counts[gc_type_max];
2806
2807 BOOL gc_heap::should_expand_in_full_gc = FALSE;
2808
2809 #ifdef HEAP_ANALYZE
2810 BOOL        gc_heap::heap_analyze_enabled = FALSE;
2811 #endif //HEAP_ANALYZE
2812
2813 #ifndef MULTIPLE_HEAPS
2814
2815 alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
2816 alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];
2817
2818 dynamic_data gc_heap::dynamic_data_table [NUMBERGENERATIONS+1];
2819 gc_history_per_heap gc_heap::gc_data_per_heap;
2820 size_t gc_heap::maxgen_pinned_compact_before_advance = 0;
2821
2822 uint8_t* gc_heap::alloc_allocated = 0;
2823
2824 size_t gc_heap::allocation_quantum = CLR_SIZE;
2825
2826 GCSpinLock gc_heap::more_space_lock;
2827
2828 #ifdef SYNCHRONIZATION_STATS
2829 unsigned int gc_heap::good_suspension = 0;
2830 unsigned int gc_heap::bad_suspension = 0;
2831 uint64_t     gc_heap::total_msl_acquire = 0;
2832 unsigned int gc_heap::num_msl_acquired = 0;
2833 unsigned int gc_heap::num_high_msl_acquire = 0;
2834 unsigned int gc_heap::num_low_msl_acquire = 0;
2835 #endif //SYNCHRONIZATION_STATS
2836
2837 size_t   gc_heap::alloc_contexts_used = 0;
2838 size_t   gc_heap::soh_allocation_no_gc = 0;
2839 size_t   gc_heap::loh_allocation_no_gc = 0;
2840 bool     gc_heap::no_gc_oom_p = false;
2841 heap_segment* gc_heap::saved_loh_segment_no_gc = 0;
2842
2843 #endif //MULTIPLE_HEAPS
2844
2845 #ifndef MULTIPLE_HEAPS
2846
2847 BOOL        gc_heap::gen0_bricks_cleared = FALSE;
2848
2849 #ifdef FFIND_OBJECT
2850 int         gc_heap::gen0_must_clear_bricks = 0;
2851 #endif //FFIND_OBJECT
2852
2853 #ifdef FEATURE_PREMORTEM_FINALIZATION
2854 CFinalize*  gc_heap::finalize_queue = 0;
2855 #endif // FEATURE_PREMORTEM_FINALIZATION
2856
2857 generation gc_heap::generation_table [NUMBERGENERATIONS + 1];
2858
2859 size_t     gc_heap::interesting_data_per_heap[max_idp_count];
2860
2861 size_t     gc_heap::compact_reasons_per_heap[max_compact_reasons_count];
2862
2863 size_t     gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count];
2864
2865 size_t     gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2866
2867 #endif // MULTIPLE_HEAPS
2868
2869 /* end of per heap static initialization */
2870
2871 /* end of static initialization */
2872
2873 #ifndef DACCESS_COMPILE
2874
2875 void gen_to_condemn_tuning::print (int heap_num)
2876 {
2877 #ifdef DT_LOG
2878     dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition));
2879     dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header));
2880     gc_condemn_reason_gen r_gen;
2881     for (int i = 0; i < gcrg_max; i++)
2882     {
2883         r_gen = (gc_condemn_reason_gen)(i);
2884         str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen));
2885     }
2886     dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen));
2887
2888     dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header));
2889     gc_condemn_reason_condition r_condition;
2890     for (int i = 0; i < gcrc_max; i++)
2891     {
2892         r_condition = (gc_condemn_reason_condition)(i);
2893         str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition));
2894     }
2895
2896     dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition));
2897 #else
2898     UNREFERENCED_PARAMETER(heap_num);
2899 #endif //DT_LOG
2900 }
2901
2902 void gc_generation_data::print (int heap_num, int gen_num)
2903 {
2904 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
2905     dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id",
2906                 heap_num, gen_num, 
2907                 size_before, 
2908                 free_list_space_before, free_obj_space_before,
2909                 size_after, 
2910                 free_list_space_after, free_obj_space_after, 
2911                 in, pinned_surv, npinned_surv,
2912                 new_allocation));
2913 #else
2914     UNREFERENCED_PARAMETER(heap_num);
2915     UNREFERENCED_PARAMETER(gen_num);
2916 #endif //SIMPLE_DPRINTF && DT_LOG
2917 }
2918
2919 void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value)
2920 {
2921     uint32_t* mechanism = &mechanisms[mechanism_per_heap];
2922     *mechanism = 0;
2923     *mechanism |= mechanism_mask;
2924     *mechanism |= (1 << value);
2925
2926 #ifdef DT_LOG
2927     gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap];
2928     dprintf (DT_LOG_0, ("setting %s: %s", 
2929             descr->name,
2930             (descr->descr)[value]));
2931 #endif //DT_LOG
2932 }
2933
2934 void gc_history_per_heap::print()
2935 {
2936 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
2937     for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++)
2938     {
2939         gen_data[i].print (heap_index, i);
2940     }
2941
2942     dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id", 
2943                     maxgen_size_info.free_list_allocated,
2944                     maxgen_size_info.free_list_rejected,
2945                     maxgen_size_info.end_seg_allocated,
2946                     maxgen_size_info.condemned_allocated,
2947                     maxgen_size_info.pinned_allocated,
2948                     maxgen_size_info.pinned_allocated_advance,
2949                     maxgen_size_info.running_free_list_efficiency,
2950                     extra_gen0_committed));
2951
2952     int mechanism = 0;
2953     gc_mechanism_descr* descr = 0;
2954
2955     for (int i = 0; i < max_mechanism_per_heap; i++)
2956     {
2957         mechanism = get_mechanism ((gc_mechanism_per_heap)i);
2958
2959         if (mechanism >= 0)
2960         {
2961             descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i];
2962             dprintf (DT_LOG_0, ("[%2d]%s%s", 
2963                         heap_index,
2964                         descr->name, 
2965                         (descr->descr)[mechanism]));
2966         }
2967     }
2968 #endif //SIMPLE_DPRINTF && DT_LOG
2969 }
2970
2971 void gc_history_global::print()
2972 {
2973 #ifdef DT_LOG
2974     char str_settings[64];
2975     memset (str_settings, '|', sizeof (char) * 64);
2976     str_settings[max_global_mechanisms_count*2] = 0;
2977
2978     for (int i = 0; i < max_global_mechanisms_count; i++)
2979     {
2980         str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N');
2981     }
2982
2983     dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|"));
2984
2985     dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings));
2986     dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d",
2987                         condemned_generation,
2988                         str_gc_reasons[reason],
2989                         str_gc_pause_modes[pause_mode],                        
2990                         final_youngest_desired,
2991                         gen0_reduction_count,
2992                         mem_pressure));
2993 #endif //DT_LOG
2994 }
2995
2996 void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num)
2997 {
2998     maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info);
2999     FIRE_EVENT(GCPerHeapHistory_V3, 
3000                (void *)(maxgen_size_info->free_list_allocated),
3001                (void *)(maxgen_size_info->free_list_rejected),                              
3002                (void *)(maxgen_size_info->end_seg_allocated),
3003                (void *)(maxgen_size_info->condemned_allocated),
3004                (void *)(maxgen_size_info->pinned_allocated),
3005                (void *)(maxgen_size_info->pinned_allocated_advance),
3006                maxgen_size_info->running_free_list_efficiency,
3007                current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(),
3008                current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(),
3009                current_gc_data_per_heap->mechanisms[gc_heap_compact],
3010                current_gc_data_per_heap->mechanisms[gc_heap_expand],
3011                current_gc_data_per_heap->heap_index,
3012                (void *)(current_gc_data_per_heap->extra_gen0_committed),
3013                (max_generation + 2),
3014                (uint32_t)(sizeof (gc_generation_data)),
3015                (void *)&(current_gc_data_per_heap->gen_data[0]));
3016
3017     current_gc_data_per_heap->print();
3018     current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num);
3019 }
3020
3021 void gc_heap::fire_pevents()
3022 {
3023 #ifndef CORECLR
3024     settings.record (&gc_data_global);
3025     gc_data_global.print();
3026
3027     FIRE_EVENT(GCGlobalHeapHistory_V2, gc_data_global.final_youngest_desired, 
3028                                   gc_data_global.num_heaps, 
3029                                   gc_data_global.condemned_generation, 
3030                                   gc_data_global.gen0_reduction_count, 
3031                                   gc_data_global.reason, 
3032                                   gc_data_global.global_mechanims_p, 
3033                                   gc_data_global.pause_mode, 
3034                                   gc_data_global.mem_pressure);
3035
3036 #ifdef MULTIPLE_HEAPS
3037     for (int i = 0; i < gc_heap::n_heaps; i++)
3038     {
3039         gc_heap* hp = gc_heap::g_heaps[i];
3040         gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
3041         fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number);
3042     }
3043 #else
3044     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
3045     fire_per_heap_hist_event (current_gc_data_per_heap, heap_number);
3046 #endif    
3047 #endif //!CORECLR
3048 }
3049
3050 inline BOOL
3051 gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
3052 {
3053     BOOL ret = FALSE;
3054
3055     switch (tp)
3056     {
3057         case tuning_deciding_condemned_gen:
3058         case tuning_deciding_compaction:
3059         case tuning_deciding_expansion:
3060         case tuning_deciding_full_gc:
3061         {
3062             ret = (!ephemeral_gen_fit_p (tp));
3063             break;
3064         }
3065         case tuning_deciding_promote_ephemeral:
3066         {
3067             size_t new_gen0size = approximate_new_allocation();
3068             ptrdiff_t plan_ephemeral_size = total_ephemeral_size;
3069             
3070             dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id", 
3071                 heap_number, plan_ephemeral_size, new_gen0size));
3072
3073             // If we were in no_gc_region we could have allocated a larger than normal segment,
3074             // and the next seg we allocate will be a normal sized seg so if we can't fit the new
3075             // ephemeral generations there, do an ephemeral promotion.
3076             ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size));
3077
3078             break;
3079         }
3080         default:
3081             break;
3082     }
3083
3084     return ret;
3085 }
3086
3087 BOOL 
3088 gc_heap::dt_high_frag_p (gc_tuning_point tp, 
3089                          int gen_number, 
3090                          BOOL elevate_p)
3091 {
3092     BOOL ret = FALSE;
3093
3094     switch (tp)
3095     {
3096         case tuning_deciding_condemned_gen:
3097         {
3098             dynamic_data* dd = dynamic_data_of (gen_number);
3099             float fragmentation_burden = 0;
3100
3101             if (elevate_p)
3102             {
3103                 ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd));
3104                 dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id",
3105                     heap_number, dd_fragmentation (dd), dd_max_size(dd)));
3106             }
3107             else
3108             {
3109 #ifndef MULTIPLE_HEAPS
3110                 if (gen_number == max_generation)
3111                 {
3112                     float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation);
3113                     if (frag_ratio > 0.65)
3114                     {
3115                         dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100)));
3116                         return TRUE;
3117                     }
3118                 }
3119 #endif //!MULTIPLE_HEAPS
3120                 size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
3121                 ret = (fr > dd_fragmentation_limit(dd));
3122                 if (ret)
3123                 {
3124                     fragmentation_burden = (float)fr / generation_size (gen_number);
3125                     ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd));
3126                 }
3127                 dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d",
3128                     heap_number, gen_number, dd_fragmentation (dd), 
3129                     (int)(100*generation_allocator_efficiency (generation_of (gen_number))),
3130                     fr, (int)(fragmentation_burden*100)));
3131             }
3132             break;
3133         }
3134         default:
3135             break;
3136     }
3137
3138     return ret;
3139 }
3140
3141 inline BOOL 
3142 gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
3143 {
3144     BOOL ret = FALSE;
3145
3146     switch (tp)
3147     {
3148         case tuning_deciding_condemned_gen:
3149         {
3150             if (gen_number == max_generation)
3151             {
3152                 dynamic_data* dd = dynamic_data_of (gen_number);
3153                 size_t maxgen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd));
3154                 size_t maxgen_total_size = maxgen_allocated + dd_current_size (dd);
3155                 size_t est_maxgen_surv = (size_t)((float) (maxgen_total_size) * dd_surv (dd));
3156                 size_t est_maxgen_free = maxgen_total_size - est_maxgen_surv + dd_fragmentation (dd);
3157
3158                 dprintf (GTC_LOG, ("h%d: Total gen2 size: %Id, est gen2 dead space: %Id (s: %d, allocated: %Id), frag: %Id",
3159                             heap_number,
3160                             maxgen_total_size,
3161                             est_maxgen_free, 
3162                             (int)(dd_surv (dd) * 100),
3163                             maxgen_allocated,
3164                             dd_fragmentation (dd)));
3165
3166                 uint32_t num_heaps = 1;
3167
3168 #ifdef MULTIPLE_HEAPS
3169                 num_heaps = gc_heap::n_heaps;
3170 #endif //MULTIPLE_HEAPS
3171
3172                 size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps);
3173                 dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th));
3174                 ret = (est_maxgen_free >= min_frag_th);
3175             }
3176             else
3177             {
3178                 assert (0);
3179             }
3180             break;
3181         }
3182
3183         default:
3184             break;
3185     }
3186
3187     return ret;
3188 }
3189
3190 // DTREVIEW: Right now we only estimate gen2 fragmentation. 
3191 // on 64-bit though we should consider gen1 or even gen0 fragmentatioin as
3192 // well 
3193 inline BOOL 
3194 gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem)
3195 {
3196     BOOL ret = FALSE;
3197
3198     switch (tp)
3199     {
3200         case tuning_deciding_condemned_gen:
3201         {
3202             if (gen_number == max_generation)
3203             {
3204                 dynamic_data* dd = dynamic_data_of (gen_number);
3205                 float est_frag_ratio = 0;
3206                 if (dd_current_size (dd) == 0)
3207                 {
3208                     est_frag_ratio = 1;
3209                 }
3210                 else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0))
3211                 {
3212                     est_frag_ratio = 0;
3213                 }
3214                 else
3215                 {
3216                     est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd));
3217                 }
3218                 
3219                 size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio));
3220                 dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id", 
3221                     heap_number,
3222                     gen_number,
3223                     dd_current_size (dd),
3224                     dd_fragmentation (dd),
3225                     (int)(est_frag_ratio*100),
3226                     est_frag));
3227
3228                 uint32_t num_heaps = 1;
3229
3230 #ifdef MULTIPLE_HEAPS
3231                 num_heaps = gc_heap::n_heaps;
3232 #endif //MULTIPLE_HEAPS
3233                 uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps);
3234                 //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th));
3235                 ret = (est_frag >= min_frag_th);
3236             }
3237             else
3238             {
3239                 assert (0);
3240             }
3241             break;
3242         }
3243
3244         default:
3245             break;
3246     }
3247
3248     return ret;
3249 }
3250
3251 inline BOOL 
3252 gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp)
3253 {
3254     BOOL ret = FALSE;
3255
3256     switch (tp)
3257     {
3258     case tuning_deciding_condemned_gen:
3259     {
3260         /* promote into max-generation if the card table has too many
3261         * generation faults besides the n -> 0
3262         */
3263         ret = (generation_skip_ratio < 30);
3264         break;
3265     }
3266
3267     default:
3268         break;
3269     }
3270
3271     return ret;
3272 }
3273
3274 inline BOOL
3275 in_range_for_segment(uint8_t* add, heap_segment* seg)
3276 {
3277     return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
3278 }
3279
3280 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
3281 // The array we allocate is organized as follows:
3282 // 0th element is the address of the last array we allocated.
3283 // starting from the 1st element are the segment addresses, that's
3284 // what buckets() returns.
3285 struct bk
3286 {
3287     uint8_t* add;
3288     size_t val;
3289 };
3290
3291 class sorted_table
3292 {
3293 private:
3294     ptrdiff_t size;
3295     ptrdiff_t count;
3296     bk* slots;
3297     bk* buckets() { return (slots + 1); }
3298     uint8_t*& last_slot (bk* arr) { return arr[0].add; }
3299     bk* old_slots;
3300 public:
3301     static  sorted_table* make_sorted_table ();
3302     BOOL    insert (uint8_t* add, size_t val);;
3303     size_t  lookup (uint8_t*& add);
3304     void    remove (uint8_t* add);
3305     void    clear ();
3306     void    delete_sorted_table();
3307     void    delete_old_slots();
3308     void    enqueue_old_slot(bk* sl);
3309     BOOL    ensure_space_for_insert();
3310 };
3311
3312 sorted_table*
3313 sorted_table::make_sorted_table ()
3314 {
3315     size_t size = 400;
3316
3317     // allocate one more bk to store the older slot address.
3318     sorted_table* res = (sorted_table*)new char [sizeof (sorted_table) + (size + 1) * sizeof (bk)];
3319     if (!res)
3320         return 0;
3321     res->size = size;
3322     res->slots = (bk*)(res + 1);
3323     res->old_slots = 0;
3324     res->clear();
3325     return res;
3326 }
3327
3328 void
3329 sorted_table::delete_sorted_table()
3330 {
3331     if (slots != (bk*)(this+1))
3332     {
3333         delete slots;
3334     }
3335     delete_old_slots();
3336     delete this;
3337 }
3338 void
3339 sorted_table::delete_old_slots()
3340 {
3341     uint8_t* sl = (uint8_t*)old_slots;
3342     while (sl)
3343     {
3344         uint8_t* dsl = sl;
3345         sl = last_slot ((bk*)sl);
3346         delete dsl;
3347     }
3348     old_slots = 0;
3349 }
3350 void
3351 sorted_table::enqueue_old_slot(bk* sl)
3352 {
3353     last_slot (sl) = (uint8_t*)old_slots;
3354     old_slots = sl;
3355 }
3356
3357 inline
3358 size_t
3359 sorted_table::lookup (uint8_t*& add)
3360 {
3361     ptrdiff_t high = (count-1);
3362     ptrdiff_t low = 0;
3363     ptrdiff_t ti;
3364     ptrdiff_t mid;
3365     bk* buck = buckets();
3366     while (low <= high)
3367     {
3368         mid = ((low + high)/2);
3369         ti = mid;
3370         if (buck[ti].add > add)
3371         {
3372             if ((ti > 0) && (buck[ti-1].add <= add))
3373             {
3374                 add = buck[ti-1].add;
3375                 return buck[ti - 1].val;
3376             }
3377             high = mid - 1;
3378         }
3379         else
3380         {
3381             if (buck[ti+1].add > add)
3382             {
3383                 add = buck[ti].add;
3384                 return buck[ti].val;
3385             }
3386             low = mid + 1;
3387         }
3388     }
3389     add = 0;
3390     return 0;
3391 }
3392
3393 BOOL
3394 sorted_table::ensure_space_for_insert()
3395 {
3396     if (count == size)
3397     {
3398         size = (size * 3)/2;
3399         assert((size * sizeof (bk)) > 0);
3400         bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)];
3401         assert (res);
3402         if (!res)
3403             return FALSE;
3404
3405         last_slot (res) = 0;
3406         memcpy (((bk*)res + 1), buckets(), count * sizeof (bk));
3407         bk* last_old_slots = slots;
3408         slots = res;
3409         if (last_old_slots != (bk*)(this + 1))
3410             enqueue_old_slot (last_old_slots);
3411     }
3412     return TRUE;
3413 }
3414
3415 BOOL
3416 sorted_table::insert (uint8_t* add, size_t val)
3417 {
3418     //grow if no more room
3419     assert (count < size);
3420
3421     //insert sorted
3422     ptrdiff_t high = (count-1);
3423     ptrdiff_t low = 0;
3424     ptrdiff_t ti;
3425     ptrdiff_t mid;
3426     bk* buck = buckets();
3427     while (low <= high)
3428     {
3429         mid = ((low + high)/2);
3430         ti = mid;
3431         if (buck[ti].add > add)
3432         {
3433             if ((ti == 0) || (buck[ti-1].add <= add))
3434             {
3435                 // found insertion point
3436                 for (ptrdiff_t k = count; k > ti;k--)
3437                 {
3438                     buck [k] = buck [k-1];
3439                 }
3440                 buck[ti].add = add;
3441                 buck[ti].val = val;
3442                 count++;
3443                 return TRUE;
3444             }
3445             high = mid - 1;
3446         }
3447         else
3448         {
3449             if (buck[ti+1].add > add)
3450             {
3451                 //found the insertion point
3452                 for (ptrdiff_t k = count; k > ti+1;k--)
3453                 {
3454                     buck [k] = buck [k-1];
3455                 }
3456                 buck[ti+1].add = add;
3457                 buck[ti+1].val = val;
3458                 count++;
3459                 return TRUE;
3460             }
3461             low = mid + 1;
3462         }
3463     }
3464     assert (0);
3465     return TRUE;
3466 }
3467
3468 void
3469 sorted_table::remove (uint8_t* add)
3470 {
3471     ptrdiff_t high = (count-1);
3472     ptrdiff_t low = 0;
3473     ptrdiff_t ti;
3474     ptrdiff_t mid;
3475     bk* buck = buckets();
3476     while (low <= high)
3477     {
3478         mid = ((low + high)/2);
3479         ti = mid;
3480         if (buck[ti].add > add)
3481         {
3482             if (buck[ti-1].add <= add)
3483             {
3484                 // found the guy to remove
3485                 for (ptrdiff_t k = ti; k < count; k++)
3486                     buck[k-1] = buck[k];
3487                 count--;
3488                 return;
3489             }
3490             high = mid - 1;
3491         }
3492         else
3493         {
3494             if (buck[ti+1].add > add)
3495             {
3496                 // found the guy to remove
3497                 for (ptrdiff_t k = ti+1; k < count; k++)
3498                     buck[k-1] = buck[k];
3499                 count--;
3500                 return;
3501             }
3502             low = mid + 1;
3503         }
3504     }
3505     assert (0);
3506 }
3507
3508 void
3509 sorted_table::clear()
3510 {
3511     count = 1;
3512     buckets()[0].add = MAX_PTR;
3513 }
3514 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
3515
3516 #ifdef SEG_MAPPING_TABLE
3517 #ifdef GROWABLE_SEG_MAPPING_TABLE
3518 inline
3519 uint8_t* align_on_segment (uint8_t* add)
3520 {
3521     return (uint8_t*)((size_t)(add + (gc_heap::min_segment_size - 1)) & ~(gc_heap::min_segment_size - 1));
3522 }
3523
3524 inline
3525 uint8_t* align_lower_segment (uint8_t* add)
3526 {
3527     return (uint8_t*)((size_t)(add) & ~(gc_heap::min_segment_size - 1));
3528 }
3529
3530 size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end)
3531 {
3532     from = align_lower_segment (from);
3533     end = align_on_segment (end);
3534     dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
3535     return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
3536 }
3537
3538 // for seg_mapping_table we want it to start from a pointer sized address.
3539 inline
3540 size_t align_for_seg_mapping_table (size_t size)
3541 {
3542     return ((size + (sizeof (uint8_t*) - 1)) &~ (sizeof (uint8_t*) - 1));
3543 }
3544
3545 inline
3546 size_t seg_mapping_word_of (uint8_t* add)
3547 {
3548     return (size_t)add >> gc_heap::min_segment_size_shr;
3549 }
3550 #else //GROWABLE_SEG_MAPPING_TABLE
3551 BOOL seg_mapping_table_init()
3552 {
3553 #ifdef BIT64
3554     uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
3555 #else
3556     uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
3557 #endif // BIT64
3558
3559     size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
3560     seg_mapping_table = new seg_mapping[num_entries];
3561
3562     if (seg_mapping_table)
3563     {
3564         memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
3565         dprintf (1, ("created %d entries for heap mapping (%Id bytes)", 
3566                      num_entries, (num_entries * sizeof (seg_mapping))));
3567         return TRUE;
3568     }
3569     else
3570     {
3571         dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)", 
3572                      num_entries, (num_entries * sizeof (seg_mapping))));
3573         return FALSE;
3574     }
3575 }
3576 #endif //GROWABLE_SEG_MAPPING_TABLE
3577
3578 #ifdef FEATURE_BASICFREEZE
3579 inline
3580 size_t ro_seg_begin_index (heap_segment* seg)
3581 {
3582     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3583     begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr);
3584     return begin_index;
3585 }
3586
3587 inline
3588 size_t ro_seg_end_index (heap_segment* seg)
3589 {
3590     size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr;
3591     end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr);
3592     return end_index;
3593 }
3594
3595 void seg_mapping_table_add_ro_segment (heap_segment* seg)
3596 {
3597 #ifdef GROWABLE_SEG_MAPPING_TABLE
3598     if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
3599         return;
3600 #endif //GROWABLE_SEG_MAPPING_TABLE
3601
3602     for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
3603         seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
3604 }
3605
3606 void seg_mapping_table_remove_ro_segment (heap_segment* seg)
3607 {
3608     UNREFERENCED_PARAMETER(seg);
3609 #if 0
3610 // POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves
3611 // to be a perf problem, we can search in the current ro segs and see if any lands in this range and only
3612 // remove the flag if none lands in this range.
3613 #endif //0
3614 }
3615
3616 heap_segment* ro_segment_lookup (uint8_t* o)
3617 {
3618     uint8_t* ro_seg_start = o;
3619     heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start);
3620
3621     if (ro_seg_start && in_range_for_segment (o, seg))
3622         return seg;
3623     else
3624         return 0;
3625 }
3626
3627 #endif //FEATURE_BASICFREEZE
3628
3629 void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp)
3630 {
3631     size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3632     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3633     seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3634     size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3635     seg_mapping* end_entry = &seg_mapping_table[end_index];
3636
3637     dprintf (1, ("adding seg %Ix(%d)-%Ix(%d)", 
3638         seg, begin_index, heap_segment_reserved (seg), end_index));
3639
3640     dprintf (1, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3641         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3642         end_index, (seg_mapping_table[end_index].boundary + 1)));
3643
3644 #ifdef MULTIPLE_HEAPS
3645 #ifdef SIMPLE_DPRINTF
3646     dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)",
3647         begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3648         (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3649         end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3650         (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3651 #endif //SIMPLE_DPRINTF
3652     assert (end_entry->boundary == 0);
3653     assert (end_entry->h0 == 0);
3654     end_entry->h0 = hp;
3655     assert (begin_entry->h1 == 0);
3656     begin_entry->h1 = hp;
3657 #else
3658     UNREFERENCED_PARAMETER(hp);
3659 #endif //MULTIPLE_HEAPS
3660
3661     end_entry->boundary = (uint8_t*)seg_end;
3662
3663     dprintf (1, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg));
3664     assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry));
3665     begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg);
3666     end_entry->seg0 = seg;
3667
3668     // for every entry inbetween we need to set its heap too.
3669     for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3670     {
3671         assert (seg_mapping_table[entry_index].boundary == 0);
3672 #ifdef MULTIPLE_HEAPS
3673         assert (seg_mapping_table[entry_index].h0 == 0);
3674         seg_mapping_table[entry_index].h1 = hp;
3675 #endif //MULTIPLE_HEAPS
3676         seg_mapping_table[entry_index].seg1 = seg;
3677     }
3678
3679     dprintf (1, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3680         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3681         end_index, (seg_mapping_table[end_index].boundary + 1)));
3682 #if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF)
3683     dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)",
3684         begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3685         (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3686         end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3687         (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3688 #endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF
3689 }
3690
3691 void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg)
3692 {
3693     size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3694     size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3695     seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3696     size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3697     seg_mapping* end_entry = &seg_mapping_table[end_index];
3698     dprintf (1, ("removing seg %Ix(%d)-%Ix(%d)", 
3699         seg, begin_index, heap_segment_reserved (seg), end_index));
3700
3701     assert (end_entry->boundary == (uint8_t*)seg_end);
3702     end_entry->boundary = 0;
3703
3704 #ifdef MULTIPLE_HEAPS
3705     gc_heap* hp = heap_segment_heap (seg);
3706     assert (end_entry->h0 == hp);
3707     end_entry->h0 = 0;
3708     assert (begin_entry->h1 == hp);
3709     begin_entry->h1 = 0;
3710 #endif //MULTIPLE_HEAPS
3711
3712     assert (begin_entry->seg1 != 0);
3713     begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry);
3714     end_entry->seg0 = 0;
3715
3716     // for every entry inbetween we need to reset its heap too.
3717     for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3718     {
3719         assert (seg_mapping_table[entry_index].boundary == 0);
3720 #ifdef MULTIPLE_HEAPS
3721         assert (seg_mapping_table[entry_index].h0 == 0);
3722         assert (seg_mapping_table[entry_index].h1 == hp);
3723         seg_mapping_table[entry_index].h1 = 0;
3724 #endif //MULTIPLE_HEAPS
3725         seg_mapping_table[entry_index].seg1 = 0;
3726     }
3727
3728     dprintf (1, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix", 
3729         begin_index, (seg_mapping_table[begin_index].boundary + 1),
3730         end_index, (seg_mapping_table[end_index].boundary + 1)));
3731 #ifdef MULTIPLE_HEAPS
3732     dprintf (1, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix",
3733         begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1),
3734         end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1)));
3735 #endif //MULTIPLE_HEAPS
3736 }
3737
3738 #ifdef MULTIPLE_HEAPS
3739 inline
3740 gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
3741 {
3742     size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3743     seg_mapping* entry = &seg_mapping_table[index];
3744
3745     gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0);
3746
3747     dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix",
3748         o, index, (entry->boundary + 1), 
3749         (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0),
3750         (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1)));
3751
3752 #ifdef _DEBUG
3753     heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3754 #ifdef FEATURE_BASICFREEZE
3755     if ((size_t)seg & ro_in_entry)
3756         seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3757 #endif //FEATURE_BASICFREEZE
3758
3759     if (seg)
3760     {
3761         if (in_range_for_segment (o, seg))
3762         {
3763             dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg)));
3764         }
3765         else
3766         {
3767             dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg", 
3768                 seg, (uint8_t*)heap_segment_allocated (seg), o));
3769         }
3770     }
3771     else
3772     {
3773         dprintf (2, ("could not find obj %Ix in any existing segments", o));
3774     }
3775 #endif //_DEBUG
3776
3777     return hp;
3778 }
3779
3780 gc_heap* seg_mapping_table_heap_of (uint8_t* o)
3781 {
3782 #ifdef GROWABLE_SEG_MAPPING_TABLE
3783     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3784         return 0;
3785 #endif //GROWABLE_SEG_MAPPING_TABLE
3786
3787     return seg_mapping_table_heap_of_worker (o);
3788 }
3789
3790 gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
3791 {
3792 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3793     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3794         return 0;
3795 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3796
3797     return seg_mapping_table_heap_of_worker (o);
3798 }
3799 #endif //MULTIPLE_HEAPS
3800
3801 // Only returns a valid seg if we can actually find o on the seg.
3802 heap_segment* seg_mapping_table_segment_of (uint8_t* o)
3803 {
3804 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3805     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3806 #ifdef FEATURE_BASICFREEZE
3807         return ro_segment_lookup (o);
3808 #else
3809         return 0;
3810 #endif //FEATURE_BASICFREEZE
3811 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3812
3813     size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3814     seg_mapping* entry = &seg_mapping_table[index];
3815
3816     dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, seg0: %Ix, seg1: %Ix",
3817         o, index, (entry->boundary + 1), 
3818         (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1)));
3819
3820     heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3821 #ifdef FEATURE_BASICFREEZE
3822     if ((size_t)seg & ro_in_entry)
3823         seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3824 #endif //FEATURE_BASICFREEZE
3825
3826     if (seg)
3827     {
3828         if (in_range_for_segment (o, seg))
3829         {
3830             dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg)));
3831         }
3832         else
3833         {
3834             dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0", 
3835                 (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o));
3836             seg = 0;
3837         }
3838     }
3839     else
3840     {
3841         dprintf (2, ("could not find obj %Ix in any existing segments", o));
3842     }
3843
3844 #ifdef FEATURE_BASICFREEZE
3845     // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro 
3846     // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range.  I.e., it had an 
3847     // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression.  However, at the moment, grow_brick_card_table does 
3848     // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest) 
3849     // range changes.  We should probably go ahead and modify grow_brick_card_table and put back the 
3850     // "&& (size_t)(entry->seg1) & ro_in_entry" here.
3851     if (!seg)
3852     {
3853         seg = ro_segment_lookup (o);
3854         if (seg && !in_range_for_segment (o, seg))
3855             seg = 0;
3856     }
3857 #endif //FEATURE_BASICFREEZE
3858
3859     return seg;
3860 }
3861 #endif //SEG_MAPPING_TABLE
3862
3863 size_t gcard_of ( uint8_t*);
3864
3865 #define memref(i) *(uint8_t**)(i)
3866
3867 //GC Flags
3868 #define GC_MARKED       (size_t)0x1
3869 #define slot(i, j) ((uint8_t**)(i))[j+1]
3870
3871 #define free_object_base_size (plug_skew + sizeof(ArrayBase))
3872
3873 class CObjectHeader : public Object
3874 {
3875 public:
3876
3877 #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
3878     // The GC expects the following methods that are provided by the Object class in the CLR but not provided
3879     // by Redhawk's version of Object.
3880     uint32_t GetNumComponents()
3881     {
3882         return ((ArrayBase *)this)->GetNumComponents();
3883     }
3884
3885     void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
3886     {
3887         UNREFERENCED_PARAMETER(bVerifyNextHeader);
3888
3889         if (this == NULL)
3890             return;
3891
3892         MethodTable * pMT = GetMethodTable();
3893
3894         _ASSERTE(pMT->SanityCheck());
3895
3896         bool noRangeChecks =
3897             (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS;
3898
3899         BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
3900         if (!noRangeChecks)
3901         {
3902             fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
3903             if (!fSmallObjectHeapPtr)
3904                 fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);
3905
3906             _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
3907         }
3908
3909 #ifdef FEATURE_STRUCTALIGN
3910         _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment()));
3911 #endif // FEATURE_STRUCTALIGN
3912
3913 #ifdef FEATURE_64BIT_ALIGNMENT
3914         if (pMT->RequiresAlign8())
3915         {
3916             _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U));
3917         }
3918 #endif // FEATURE_64BIT_ALIGNMENT
3919
3920 #ifdef VERIFY_HEAP
3921         if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
3922             g_theGCHeap->ValidateObjectMember(this);
3923 #endif
3924         if (fSmallObjectHeapPtr)
3925         {
3926 #ifdef FEATURE_BASICFREEZE
3927             _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
3928 #else
3929             _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
3930 #endif
3931         }
3932     }
3933
3934     void ValidatePromote(ScanContext *sc, uint32_t flags)
3935     {
3936         UNREFERENCED_PARAMETER(sc);
3937         UNREFERENCED_PARAMETER(flags);
3938
3939         Validate();
3940     }
3941
3942     void ValidateHeap(Object *from, BOOL bDeep)
3943     {
3944         UNREFERENCED_PARAMETER(from);
3945
3946         Validate(bDeep, FALSE);
3947     }
3948
3949     ADIndex GetAppDomainIndex()
3950     {
3951         return (ADIndex)RH_DEFAULT_DOMAIN_ID;
3952     }
3953 #endif //FEATURE_REDHAWK
3954
3955     /////
3956     //
3957     // Header Status Information
3958     //
3959
3960     MethodTable    *GetMethodTable() const
3961     {
3962         return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~(GC_MARKED))));
3963     }
3964
3965     void SetMarked()
3966     {
3967         RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED));
3968     }
3969
3970     BOOL IsMarked() const
3971     {
3972         return !!(((size_t)RawGetMethodTable()) & GC_MARKED);
3973     }
3974
3975     void SetPinned()
3976     {
3977         assert (!(gc_heap::settings.concurrent));
3978         GetHeader()->SetGCBit();
3979     }
3980
3981     BOOL IsPinned() const
3982     {
3983         return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE);
3984     }
3985
3986     void ClearMarked()
3987     {
3988         RawSetMethodTable( GetMethodTable() );
3989     }
3990
3991     CGCDesc *GetSlotMap ()
3992     {
3993         assert (GetMethodTable()->ContainsPointers());
3994         return CGCDesc::GetCGCDescFromMT(GetMethodTable());
3995     }
3996
3997     void SetFree(size_t size)
3998     {
3999         assert (size >= free_object_base_size);
4000
4001         assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
4002         assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1);
4003
4004         RawSetMethodTable( g_gc_pFreeObjectMethodTable );
4005
4006         size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()];
4007         *numComponentsPtr = size - free_object_base_size;
4008 #ifdef VERIFY_HEAP
4009         //This introduces a bug in the free list management. 
4010         //((void**) this)[-1] = 0;    // clear the sync block,
4011         assert (*numComponentsPtr >= 0);
4012         if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
4013             memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr);
4014 #endif //VERIFY_HEAP
4015     }
4016
4017     void UnsetFree()
4018     {
4019         size_t size = free_object_base_size - plug_skew;
4020
4021         // since we only need to clear 2 ptr size, we do it manually
4022         PTR_PTR m = (PTR_PTR) this;
4023         for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
4024             *(m++) = 0;
4025     }
4026
4027     BOOL IsFree () const
4028     {
4029         return (GetMethodTable() == g_gc_pFreeObjectMethodTable);
4030     }
4031
4032 #ifdef FEATURE_STRUCTALIGN
4033     int GetRequiredAlignment () const
4034     {
4035         return GetMethodTable()->GetRequiredAlignment();
4036     }
4037 #endif // FEATURE_STRUCTALIGN
4038
4039     BOOL ContainsPointers() const
4040     {
4041         return GetMethodTable()->ContainsPointers();
4042     }
4043
4044 #ifdef COLLECTIBLE_CLASS
4045     BOOL Collectible() const
4046     {
4047         return GetMethodTable()->Collectible();
4048     }
4049
4050     FORCEINLINE BOOL ContainsPointersOrCollectible() const
4051     {
4052         MethodTable *pMethodTable = GetMethodTable();
4053         return (pMethodTable->ContainsPointers() || pMethodTable->Collectible());
4054     }
4055 #endif //COLLECTIBLE_CLASS
4056
4057     Object* GetObjectBase() const
4058     {
4059         return (Object*) this;
4060     }
4061 };
4062
4063 #define header(i) ((CObjectHeader*)(i))
4064
4065 #define free_list_slot(x) ((uint8_t**)(x))[2]
4066 #define free_list_undo(x) ((uint8_t**)(x))[-1]
4067 #define UNDO_EMPTY ((uint8_t*)1)
4068
4069 #ifdef SHORT_PLUGS
4070 inline 
4071 void set_plug_padded (uint8_t* node)
4072 {
4073     header(node)->SetMarked();
4074 }
4075 inline
4076 void clear_plug_padded (uint8_t* node)
4077 {
4078     header(node)->ClearMarked();
4079 }
4080 inline
4081 BOOL is_plug_padded (uint8_t* node)
4082 {
4083     return header(node)->IsMarked();
4084 }
4085 #else //SHORT_PLUGS
4086 inline void set_plug_padded (uint8_t* node){}
4087 inline void clear_plug_padded (uint8_t* node){}
4088 inline
4089 BOOL is_plug_padded (uint8_t* node){return FALSE;}
4090 #endif //SHORT_PLUGS
4091
4092
4093 inline size_t unused_array_size(uint8_t * p)
4094 {
4095     assert(((CObjectHeader*)p)->IsFree());
4096
4097     size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents());
4098     return free_object_base_size + *numComponentsPtr;
4099 }
4100
4101 heap_segment* heap_segment_rw (heap_segment* ns)
4102 {
4103     if ((ns == 0) || !heap_segment_read_only_p (ns))
4104     {
4105         return ns;
4106     }
4107     else
4108     {
4109         do
4110         {
4111             ns = heap_segment_next (ns);
4112         } while ((ns != 0) && heap_segment_read_only_p (ns));
4113         return ns;
4114     }
4115 }
4116
4117 //returns the next non ro segment.
4118 heap_segment* heap_segment_next_rw (heap_segment* seg)
4119 {
4120     heap_segment* ns = heap_segment_next (seg);
4121     return heap_segment_rw (ns);
4122 }
4123
4124 // returns the segment before seg.
4125 heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg)
4126 {
4127     assert (begin != 0);
4128     heap_segment* prev = begin;
4129     heap_segment* current = heap_segment_next_rw (begin);
4130
4131     while (current && current != seg)
4132     {
4133         prev = current;
4134         current = heap_segment_next_rw (current);
4135     }
4136
4137     if (current == seg)
4138     {
4139         return prev;
4140     }
4141     else
4142     {
4143         return 0;
4144     }
4145 }
4146
4147 // returns the segment before seg.
4148 heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg)
4149 {
4150     assert (begin != 0);
4151     heap_segment* prev = begin;
4152     heap_segment* current = heap_segment_next (begin);
4153
4154     while (current && current != seg)
4155     {
4156         prev = current;
4157         current = heap_segment_next (current);
4158     }
4159
4160     if (current == seg)
4161     {
4162         return prev;
4163     }
4164     else
4165     {
4166         return 0;
4167     }
4168 }
4169
4170 heap_segment* heap_segment_in_range (heap_segment* ns)
4171 {
4172     if ((ns == 0) || heap_segment_in_range_p (ns))
4173     {
4174         return ns;
4175     }
4176     else
4177     {
4178         do
4179         {
4180             ns = heap_segment_next (ns);
4181         } while ((ns != 0) && !heap_segment_in_range_p (ns));
4182         return ns;
4183     }
4184 }
4185
4186 heap_segment* heap_segment_next_in_range (heap_segment* seg)
4187 {
4188     heap_segment* ns = heap_segment_next (seg);
4189     return heap_segment_in_range (ns);
4190 }
4191
4192 typedef struct
4193 {
4194     uint8_t* memory_base;
4195 } imemory_data;
4196
4197 typedef struct
4198 {
4199     imemory_data *initial_memory;
4200     imemory_data *initial_normal_heap; // points into initial_memory_array
4201     imemory_data *initial_large_heap;  // points into initial_memory_array
4202
4203     size_t block_size_normal;
4204     size_t block_size_large;
4205
4206     size_t block_count;                // # of blocks in each
4207     size_t current_block_normal;
4208     size_t current_block_large;
4209
4210     enum 
4211     { 
4212         ALLATONCE = 1, 
4213         TWO_STAGE, 
4214         EACH_BLOCK 
4215     };
4216
4217     size_t allocation_pattern;
4218 } initial_memory_details;
4219
4220 initial_memory_details memory_details;
4221
4222 BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
4223 {
4224     BOOL reserve_success = FALSE;
4225
4226     // should only be called once
4227     assert (memory_details.initial_memory == 0);
4228
4229     memory_details.initial_memory = new (nothrow) imemory_data[num_heaps*2];
4230     if (memory_details.initial_memory == 0)
4231     {
4232         dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps*2*sizeof(imemory_data)));
4233         return FALSE;
4234     }
4235
4236     memory_details.initial_normal_heap = memory_details.initial_memory;
4237     memory_details.initial_large_heap = memory_details.initial_memory + num_heaps;
4238     memory_details.block_size_normal = normal_size;
4239     memory_details.block_size_large = large_size;
4240     memory_details.block_count = num_heaps;
4241
4242     memory_details.current_block_normal = 0;
4243     memory_details.current_block_large = 0;
4244
4245     g_gc_lowest_address = MAX_PTR;
4246     g_gc_highest_address = 0;
4247
4248     if (((size_t)MAX_PTR - large_size) < normal_size)
4249     {
4250         // we are already overflowing with just one heap.
4251         dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size));
4252         return FALSE;
4253     }
4254
4255     if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size))
4256     {
4257         dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count));
4258         return FALSE;
4259     }
4260
4261     size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
4262
4263     uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
4264     if (allatonce_block)
4265     {
4266         g_gc_lowest_address =  allatonce_block;
4267         g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
4268         memory_details.allocation_pattern = initial_memory_details::ALLATONCE;
4269
4270         for(size_t i = 0; i < memory_details.block_count; i++)
4271         {
4272             memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i*normal_size);
4273             memory_details.initial_large_heap[i].memory_base = allatonce_block +
4274                             (memory_details.block_count*normal_size) + (i*large_size);
4275             reserve_success = TRUE;
4276         }
4277     }
4278     else
4279     {
4280         // try to allocate 2 blocks
4281         uint8_t* b1 = 0;
4282         uint8_t* b2 = 0;
4283         b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
4284         if (b1)
4285         {
4286             b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
4287             if (b2)
4288             {
4289                 memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
4290                 g_gc_lowest_address = min(b1,b2);
4291                 g_gc_highest_address = max(b1 + memory_details.block_count*normal_size,
4292                                         b2 + memory_details.block_count*large_size);
4293                 for(size_t i = 0; i < memory_details.block_count; i++)
4294                 {
4295                     memory_details.initial_normal_heap[i].memory_base = b1 + (i*normal_size);
4296                     memory_details.initial_large_heap[i].memory_base = b2 + (i*large_size);
4297                     reserve_success = TRUE;
4298                 }
4299             }
4300             else
4301             {
4302                 // b2 allocation failed, we'll go on to try allocating each block.
4303                 // We could preserve the b1 alloc, but code complexity increases
4304                 virtual_free (b1, memory_details.block_count * normal_size);
4305             }
4306         }
4307
4308         if ((b2==NULL) && ( memory_details.block_count > 1))
4309         {
4310             memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK;
4311
4312             imemory_data *current_block = memory_details.initial_memory;
4313             for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4314             {
4315                 size_t block_size = ((i < memory_details.block_count) ?
4316                                      memory_details.block_size_normal :
4317                                      memory_details.block_size_large);
4318                 current_block->memory_base =
4319                     (uint8_t*)virtual_alloc (block_size);
4320                 if (current_block->memory_base == 0)
4321                 {
4322                     // Free the blocks that we've allocated so far
4323                     current_block = memory_details.initial_memory;
4324                     for(size_t j = 0; j < i; j++, current_block++){
4325                         if (current_block->memory_base != 0){
4326                             block_size = ((j < memory_details.block_count) ?
4327                                      memory_details.block_size_normal :
4328                                      memory_details.block_size_large);
4329                              virtual_free (current_block->memory_base , block_size);
4330                         }
4331                     }
4332                     reserve_success = FALSE;
4333                     break;
4334                 }
4335                 else
4336                 {
4337                     if (current_block->memory_base < g_gc_lowest_address)
4338                         g_gc_lowest_address =  current_block->memory_base;
4339                     if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address)
4340                         g_gc_highest_address = (current_block->memory_base + block_size);
4341                 }
4342                 reserve_success = TRUE;
4343             }
4344         }
4345     }
4346
4347     return reserve_success;
4348 }
4349
4350 void destroy_initial_memory()
4351 {
4352     if (memory_details.initial_memory != NULL)
4353     {
4354         if (memory_details.allocation_pattern == initial_memory_details::ALLATONCE)
4355         {
4356             virtual_free(memory_details.initial_memory[0].memory_base,
4357                 memory_details.block_count*(memory_details.block_size_normal +
4358                 memory_details.block_size_large));
4359         }
4360         else if (memory_details.allocation_pattern == initial_memory_details::TWO_STAGE)
4361         {
4362             virtual_free (memory_details.initial_normal_heap[0].memory_base,
4363                 memory_details.block_count*memory_details.block_size_normal);
4364
4365             virtual_free (memory_details.initial_large_heap[0].memory_base,
4366                 memory_details.block_count*memory_details.block_size_large);
4367         }
4368         else
4369         {
4370             assert (memory_details.allocation_pattern == initial_memory_details::EACH_BLOCK);
4371             imemory_data *current_block = memory_details.initial_memory;
4372             for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4373             {
4374                 size_t block_size = (i < memory_details.block_count) ? memory_details.block_size_normal :
4375                                                                        memory_details.block_size_large;
4376                 if (current_block->memory_base != NULL)
4377                 {
4378                     virtual_free (current_block->memory_base, block_size);
4379                 }
4380             }
4381         }
4382
4383         delete [] memory_details.initial_memory;
4384         memory_details.initial_memory = NULL;
4385         memory_details.initial_normal_heap = NULL;
4386         memory_details.initial_large_heap = NULL;
4387     }
4388 }
4389
4390 void* next_initial_memory (size_t size)
4391 {
4392     assert ((size == memory_details.block_size_normal) || (size == memory_details.block_size_large));
4393     void *res = NULL;
4394
4395     if ((size != memory_details.block_size_normal) ||
4396         ((memory_details.current_block_normal == memory_details.block_count) &&
4397          (memory_details.block_size_normal == memory_details.block_size_large)))
4398     {
4399         // If the block sizes are the same, flow block requests from normal to large
4400         assert (memory_details.current_block_large < memory_details.block_count);
4401         assert (memory_details.initial_large_heap != 0);
4402
4403         res = memory_details.initial_large_heap[memory_details.current_block_large].memory_base;
4404         memory_details.current_block_large++;
4405     }
4406     else
4407     {
4408         assert (memory_details.current_block_normal < memory_details.block_count);
4409         assert (memory_details.initial_normal_heap != NULL);
4410
4411         res = memory_details.initial_normal_heap[memory_details.current_block_normal].memory_base;
4412         memory_details.current_block_normal++;
4413     }
4414
4415     return res;
4416 }
4417
4418 heap_segment* get_initial_segment (size_t size, int h_number)
4419 {
4420     void* mem = next_initial_memory (size);
4421     heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size , h_number);
4422
4423     return res;
4424 }
4425
4426 void* virtual_alloc (size_t size)
4427 {
4428     size_t requested_size = size;
4429
4430     if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4431     {
4432         gc_heap::reserved_memory_limit =
4433             GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
4434         if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4435         {
4436             return 0;
4437         }
4438     }
4439
4440     uint32_t flags = VirtualReserveFlags::None;
4441 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4442     if (virtual_alloc_hardware_write_watch)
4443     {
4444         flags = VirtualReserveFlags::WriteWatch;
4445     }
4446 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4447     void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
4448     void *aligned_mem = prgmem;
4449
4450     // We don't want (prgmem + size) to be right at the end of the address space 
4451     // because we'd have to worry about that everytime we do (address + size).
4452     // We also want to make sure that we leave LARGE_OBJECT_SIZE at the end 
4453     // so we allocate a small object we don't need to worry about overflow there
4454     // when we do alloc_ptr+size.
4455     if (prgmem)
4456     {
4457         uint8_t* end_mem = (uint8_t*)prgmem + requested_size;
4458
4459         if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
4460         {
4461             GCToOSInterface::VirtualRelease (prgmem, requested_size);
4462             dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
4463                         requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4464             prgmem = 0;
4465             aligned_mem = 0;
4466         }
4467     }
4468
4469     if (prgmem)
4470     {
4471         gc_heap::reserved_memory += requested_size;
4472     }
4473
4474     dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[",
4475                  requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4476
4477     return aligned_mem;
4478 }
4479
4480 void virtual_free (void* add, size_t size)
4481 {
4482     GCToOSInterface::VirtualRelease (add, size);
4483     gc_heap::reserved_memory -= size;
4484     dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
4485                  size, (size_t)add, (size_t)((uint8_t*)add+size)));
4486 }
4487
4488 static size_t get_valid_segment_size (BOOL large_seg=FALSE)
4489 {
4490     size_t seg_size, initial_seg_size;
4491
4492     if (!large_seg)
4493     {
4494         initial_seg_size = INITIAL_ALLOC;
4495         seg_size = static_cast<size_t>(GCConfig::GetSegmentSize());
4496     }
4497     else
4498     {
4499         initial_seg_size = LHEAP_ALLOC;
4500         seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2;
4501     }
4502
4503 #ifdef MULTIPLE_HEAPS
4504 #ifdef BIT64
4505     if (!large_seg)
4506 #endif // BIT64
4507     {
4508         if (g_num_processors > 4)
4509             initial_seg_size /= 2;
4510         if (g_num_processors > 8)
4511             initial_seg_size /= 2;
4512     }
4513 #endif //MULTIPLE_HEAPS
4514
4515     // if seg_size is small but not 0 (0 is default if config not set)
4516     // then set the segment to the minimum size
4517     if (!g_theGCHeap->IsValidSegmentSize(seg_size))
4518     {
4519         // if requested size is between 1 byte and 4MB, use min
4520         if ((seg_size >> 1) && !(seg_size >> 22))
4521             seg_size = 1024*1024*4;
4522         else
4523             seg_size = initial_seg_size;
4524     }
4525
4526 #ifdef SEG_MAPPING_TABLE
4527 #ifdef BIT64
4528     seg_size = round_up_power2 (seg_size);
4529 #else
4530     seg_size = round_down_power2 (seg_size);
4531 #endif // BIT64
4532 #endif //SEG_MAPPING_TABLE
4533
4534     return (seg_size);
4535 }
4536
4537 void
4538 gc_heap::compute_new_ephemeral_size()
4539 {
4540     int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0);
4541     size_t padding_size = 0;
4542
4543     for (int i = 0; i <= eph_gen_max; i++)
4544     {
4545         dynamic_data* dd = dynamic_data_of (i);
4546         total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd));
4547 #ifdef RESPECT_LARGE_ALIGNMENT
4548         total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE);
4549 #endif //RESPECT_LARGE_ALIGNMENT
4550 #ifdef FEATURE_STRUCTALIGN
4551         total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN;
4552 #endif //FEATURE_STRUCTALIGN
4553
4554 #ifdef SHORT_PLUGS
4555         padding_size += dd_padding_size (dd);
4556 #endif //SHORT_PLUGS
4557     }
4558
4559     total_ephemeral_size += eph_gen_starts_size;
4560
4561 #ifdef RESPECT_LARGE_ALIGNMENT
4562     size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) -
4563                                        generation_plan_allocation_start (generation_of (max_generation-1));
4564     total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size);
4565 #endif //RESPECT_LARGE_ALIGNMENT
4566
4567 #ifdef SHORT_PLUGS
4568     total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1);
4569     total_ephemeral_size += Align (DESIRED_PLUG_LENGTH);
4570 #endif //SHORT_PLUGS
4571
4572     dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)", 
4573         total_ephemeral_size,
4574         padding_size, (total_ephemeral_size - padding_size)));
4575 }
4576
4577 #ifdef _MSC_VER
4578 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
4579 #endif // _MSC_VER
4580
4581 heap_segment*
4582 gc_heap::soh_get_segment_to_expand()
4583 {
4584     size_t size = soh_segment_size;
4585
4586     ordered_plug_indices_init = FALSE;
4587     use_bestfit = FALSE;
4588
4589     //compute the size of the new ephemeral heap segment.
4590     compute_new_ephemeral_size();
4591
4592     if ((settings.pause_mode != pause_low_latency) &&
4593         (settings.pause_mode != pause_no_gc)
4594 #ifdef BACKGROUND_GC
4595         && (!recursive_gc_sync::background_running_p())
4596 #endif //BACKGROUND_GC
4597         )
4598     {
4599         allocator*  gen_alloc = ((settings.condemned_generation == max_generation) ? 0 :
4600                               generation_allocator (generation_of (max_generation)));
4601         dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation));
4602
4603         // try to find one in the gen 2 segment list, search backwards because the first segments
4604         // tend to be more compact than the later ones.
4605         heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
4606
4607         PREFIX_ASSUME(fseg != NULL);
4608
4609 #ifdef SEG_REUSE_STATS
4610         int try_reuse = 0;
4611 #endif //SEG_REUSE_STATS
4612
4613         heap_segment* seg = ephemeral_heap_segment;
4614         while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg))
4615         {
4616 #ifdef SEG_REUSE_STATS
4617         try_reuse++;
4618 #endif //SEG_REUSE_STATS
4619
4620             if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc))
4621             {
4622                 get_gc_data_per_heap()->set_mechanism (gc_heap_expand, 
4623                     (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal));
4624                 if (settings.condemned_generation == max_generation)
4625                 {
4626                     if (use_bestfit)
4627                     {
4628                         build_ordered_free_spaces (seg);
4629                         dprintf (GTC_LOG, ("can use best fit"));
4630                     }
4631
4632 #ifdef SEG_REUSE_STATS
4633                     dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse", 
4634                         settings.condemned_generation, try_reuse));
4635 #endif //SEG_REUSE_STATS
4636                     dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg));
4637                     return seg;
4638                 }
4639                 else
4640                 {
4641 #ifdef SEG_REUSE_STATS
4642                     dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning", 
4643                         settings.condemned_generation, try_reuse));
4644 #endif //SEG_REUSE_STATS
4645                     dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg));
4646
4647                     // If we return 0 here, the allocator will think since we are short on end
4648                     // of seg we neeed to trigger a full compacting GC. So if sustained low latency 
4649                     // is set we should acquire a new seg instead, that way we wouldn't be short.
4650                     // The real solution, of course, is to actually implement seg reuse in gen1.
4651                     if (settings.pause_mode != pause_sustained_low_latency)
4652                     {
4653                         dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg"));
4654                         get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc);
4655                         return 0;
4656                     }
4657                 }
4658             }
4659         }
4660     }
4661
4662     heap_segment* result = get_segment (size, FALSE);
4663
4664     if(result)
4665     {
4666 #ifdef BACKGROUND_GC
4667         if (current_c_gc_state == c_gc_state_planning)
4668         {
4669             // When we expand heap during bgc sweep, we set the seg to be swept so 
4670             // we'll always look at cards for objects on the new segment.
4671             result->flags |= heap_segment_flags_swept;
4672         }
4673 #endif //BACKGROUND_GC
4674
4675         FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result),
4676                                   (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)),
4677                                   gc_etw_segment_small_object_heap);
4678     }
4679
4680     get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));
4681
4682     if (result == 0)
4683     {
4684         dprintf (2, ("h%d: failed to allocate a new segment!", heap_number));
4685     }
4686     else
4687     {
4688 #ifdef MULTIPLE_HEAPS
4689         heap_segment_heap (result) = this;
4690 #endif //MULTIPLE_HEAPS
4691     }
4692
4693     dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result));
4694     return result;
4695 }
4696
4697 #ifdef _MSC_VER
4698 #pragma warning(default:4706)
4699 #endif // _MSC_VER
4700
4701 //returns 0 in case of allocation failure
4702 heap_segment*
4703 gc_heap::get_segment (size_t size, BOOL loh_p)
4704 {
4705     heap_segment* result = 0;
4706
4707     if (segment_standby_list != 0)
4708     {
4709         result = segment_standby_list;
4710         heap_segment* last = 0;
4711         while (result)
4712         {
4713             size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result);
4714             if ((hs >= size) && ((hs / 2) < size))
4715             {
4716                 dprintf (2, ("Hoarded segment %Ix found", (size_t) result));
4717                 if (last)
4718                 {
4719                     heap_segment_next (last) = heap_segment_next (result);
4720                 }
4721                 else
4722                 {
4723                     segment_standby_list = heap_segment_next (result);
4724                 }
4725                 break;
4726             }
4727             else
4728             {
4729                 last = result;
4730                 result = heap_segment_next (result);
4731             }
4732         }
4733     }
4734
4735     if (result)
4736     {
4737         init_heap_segment (result);
4738 #ifdef BACKGROUND_GC
4739         if (should_commit_mark_array())
4740         {
4741             dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array));
4742             if (!commit_mark_array_new_seg (__this, result))
4743             {
4744                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg"));
4745                 // If we can't use it we need to thread it back.
4746                 if (segment_standby_list != 0)
4747                 {
4748                     heap_segment_next (result) = segment_standby_list;
4749                     segment_standby_list = result;
4750                 }
4751                 else
4752                 {
4753                     segment_standby_list = result;
4754                 }
4755
4756                 result = 0;
4757             }
4758         }
4759 #endif //BACKGROUND_GC
4760
4761 #ifdef SEG_MAPPING_TABLE
4762         if (result)
4763             seg_mapping_table_add_segment (result, __this);
4764 #endif //SEG_MAPPING_TABLE
4765     }
4766
4767     if (!result)
4768     {
4769 #ifndef SEG_MAPPING_TABLE
4770         if (!seg_table->ensure_space_for_insert ())
4771             return 0;
4772 #endif //SEG_MAPPING_TABLE
4773         void* mem = virtual_alloc (size);
4774         if (!mem)
4775         {
4776             fgm_result.set_fgm (fgm_reserve_segment, size, loh_p);
4777             return 0;
4778         }
4779
4780         result = gc_heap::make_heap_segment ((uint8_t*)mem, size, heap_number);
4781
4782         if (result)
4783         {
4784             uint8_t* start;
4785             uint8_t* end;
4786             if (mem < g_gc_lowest_address)
4787             {
4788                 start =  (uint8_t*)mem;
4789             }
4790             else
4791             {
4792                 start = (uint8_t*)g_gc_lowest_address;
4793             }
4794
4795             if (((uint8_t*)mem + size) > g_gc_highest_address)
4796             {
4797                 end = (uint8_t*)mem + size;
4798             }
4799             else
4800             {
4801                 end = (uint8_t*)g_gc_highest_address;
4802             }
4803
4804             if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
4805             {
4806                 virtual_free (mem, size);
4807                 return 0;
4808             }
4809         }
4810         else
4811         {
4812             fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, loh_p);
4813             virtual_free (mem, size);
4814         }
4815
4816         if (result)
4817         {
4818 #ifdef SEG_MAPPING_TABLE
4819             seg_mapping_table_add_segment (result, __this);
4820 #else //SEG_MAPPING_TABLE
4821             gc_heap::seg_table->insert ((uint8_t*)result, delta);
4822 #endif //SEG_MAPPING_TABLE
4823         }
4824     }
4825
4826 #ifdef BACKGROUND_GC
4827     if (result)
4828     {
4829         ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result), 
4830                             settings.gc_index, current_bgc_state,
4831                             seg_added);
4832         bgc_verify_mark_array_cleared (result);
4833     }
4834 #endif //BACKGROUND_GC
4835
4836     dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size));
4837     return result;
4838 }
4839
4840 void release_segment (heap_segment* sg)
4841 {
4842     ptrdiff_t delta = 0;
4843     FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg));
4844     virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg);
4845 }
4846
4847 heap_segment* gc_heap::get_segment_for_loh (size_t size
4848 #ifdef MULTIPLE_HEAPS
4849                                            , gc_heap* hp
4850 #endif //MULTIPLE_HEAPS
4851                                            )
4852 {
4853 #ifndef MULTIPLE_HEAPS
4854     gc_heap* hp = 0;
4855 #endif //MULTIPLE_HEAPS
4856     heap_segment* res = hp->get_segment (size, TRUE);
4857     if (res != 0)
4858     {
4859 #ifdef MULTIPLE_HEAPS
4860         heap_segment_heap (res) = hp;
4861 #endif //MULTIPLE_HEAPS
4862         res->flags |= heap_segment_flags_loh;
4863
4864         FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), gc_etw_segment_large_object_heap);
4865
4866         GCToEEInterface::DiagUpdateGenerationBounds();
4867
4868 #ifdef MULTIPLE_HEAPS
4869         hp->thread_loh_segment (res);
4870 #else
4871         thread_loh_segment (res);
4872 #endif //MULTIPLE_HEAPS
4873     }
4874
4875     return res;
4876 }
4877
4878 void gc_heap::thread_loh_segment (heap_segment* new_seg)
4879 {
4880     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
4881
4882     while (heap_segment_next_rw (seg))
4883         seg = heap_segment_next_rw (seg);
4884     heap_segment_next (seg) = new_seg;
4885 }
4886
4887 heap_segment*
4888 gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
4889 {
4890     *did_full_compact_gc = FALSE;
4891     size_t last_full_compact_gc_count = get_full_compact_gc_count();
4892
4893     //access to get_segment needs to be serialized
4894     add_saved_spinlock_info (me_release, mt_get_large_seg);
4895
4896     dprintf (SPINLOCK_LOG, ("[%d]Seg: Lmsl", heap_number));
4897     leave_spin_lock (&more_space_lock);
4898     enter_spin_lock (&gc_heap::gc_lock);
4899     dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number));
4900     // if a GC happened between here and before we ask for a segment in 
4901     // get_large_segment, we need to count that GC.
4902     size_t current_full_compact_gc_count = get_full_compact_gc_count();
4903
4904     if (current_full_compact_gc_count > last_full_compact_gc_count)
4905     {
4906         *did_full_compact_gc = TRUE;
4907     }
4908
4909 #ifdef BACKGROUND_GC
4910     while (current_c_gc_state == c_gc_state_planning)
4911     {
4912         dprintf (3, ("lh state planning, waiting to get a large seg"));
4913
4914         dprintf (SPINLOCK_LOG, ("[%d]Seg: P, Lgc", heap_number));
4915         leave_spin_lock (&gc_lock);
4916         background_gc_wait_lh (awr_get_loh_seg);
4917         enter_spin_lock (&gc_lock);
4918         dprintf (SPINLOCK_LOG, ("[%d]Seg: P, Egc", heap_number));
4919     }
4920     assert ((current_c_gc_state == c_gc_state_free) ||
4921             (current_c_gc_state == c_gc_state_marking));
4922 #endif //BACKGROUND_GC
4923
4924     heap_segment* res = get_segment_for_loh (size
4925 #ifdef MULTIPLE_HEAPS
4926                                             , this
4927 #endif //MULTIPLE_HEAPS
4928                                             );
4929
4930     dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number));
4931     leave_spin_lock (&gc_heap::gc_lock);
4932     enter_spin_lock (&more_space_lock);
4933     dprintf (SPINLOCK_LOG, ("[%d]Seg: A Emsl", heap_number));
4934     add_saved_spinlock_info (me_acquire, mt_get_large_seg);
4935     
4936 #ifdef BACKGROUND_GC
4937     wait_for_background_planning (awr_get_loh_seg);
4938 #endif //BACKGROUND_GC
4939
4940     return res;
4941 }
4942
4943 #if 0
4944 BOOL gc_heap::unprotect_segment (heap_segment* seg)
4945 {
4946     uint8_t* start = align_lower_page (heap_segment_mem (seg));
4947     ptrdiff_t region_size = heap_segment_allocated (seg) - start;
4948
4949     if (region_size != 0 )
4950     {
4951         dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
4952
4953         BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
4954         assert (status);
4955         return status;
4956     }
4957     return FALSE;
4958 }
4959 #endif
4960
4961 #ifdef MULTIPLE_HEAPS
4962 #ifdef _X86_
4963 #ifdef _MSC_VER
4964 #pragma warning(disable:4035)
4965     static ptrdiff_t  get_cycle_count()
4966     {
4967         __asm   rdtsc
4968     }
4969 #pragma warning(default:4035)
4970 #elif defined(__GNUC__)
4971     static ptrdiff_t  get_cycle_count()
4972     {
4973         ptrdiff_t cycles;
4974         ptrdiff_t cyclesHi;
4975         __asm__ __volatile__
4976         ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
4977         return cycles;
4978     }
4979 #else //_MSC_VER
4980 #error Unknown compiler
4981 #endif //_MSC_VER
4982 #elif defined(_TARGET_AMD64_) 
4983 #ifdef _MSC_VER
4984 extern "C" uint64_t __rdtsc();
4985 #pragma intrinsic(__rdtsc)
4986     static ptrdiff_t get_cycle_count()
4987     {
4988         return (ptrdiff_t)__rdtsc();
4989     }
4990 #elif defined(__clang__)    
4991     static ptrdiff_t get_cycle_count()
4992     {
4993         ptrdiff_t cycles;
4994         ptrdiff_t cyclesHi;
4995         __asm__ __volatile__
4996         ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
4997         return (cyclesHi << 32) | cycles;
4998     }
4999 #else // _MSC_VER
5000     extern "C" ptrdiff_t get_cycle_count(void);
5001 #endif // _MSC_VER
5002 #elif defined(_TARGET_ARM_)
5003     static ptrdiff_t get_cycle_count()
5004     {
5005         // @ARMTODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5006         // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5007         // all buffer access times being reported as equal in access_time().
5008         return 0;
5009     }
5010 #elif defined(_TARGET_ARM64_)
5011     static ptrdiff_t get_cycle_count()
5012     {
5013         // @ARM64TODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5014         // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5015         // all buffer access times being reported as equal in access_time().
5016         return 0;
5017     }
5018 #else
5019 #error NYI platform: get_cycle_count
5020 #endif //_TARGET_X86_
5021
5022 class heap_select
5023 {
5024     heap_select() {}
5025     static uint8_t* sniff_buffer;
5026     static unsigned n_sniff_buffers;
5027     static unsigned cur_sniff_index;
5028
5029     static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5030     static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5031     static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5032     static uint16_t heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
5033     static uint16_t heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
5034     static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5035
5036     static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers)
5037     {
5038         ptrdiff_t start_cycles = get_cycle_count();
5039         uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE];
5040         assert (sniff == 0);
5041         ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles;
5042         // add sniff here just to defeat the optimizer
5043         elapsed_cycles += sniff;
5044         return (int) elapsed_cycles;
5045     }
5046
5047 public:
5048     static BOOL init(int n_heaps)
5049     {
5050         assert (sniff_buffer == NULL && n_sniff_buffers == 0);
5051         if (!GCToOSInterface::CanGetCurrentProcessorNumber())
5052         {
5053             n_sniff_buffers = n_heaps*2+1;
5054             size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1;
5055             size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
5056             if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow
5057             {
5058                 return FALSE;
5059             }
5060
5061             sniff_buffer = new (nothrow) uint8_t[sniff_buf_size];
5062             if (sniff_buffer == 0)
5063                 return FALSE;
5064             memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t));
5065         }
5066
5067         //can not enable gc numa aware, force all heaps to be in
5068         //one numa node by filling the array with all 0s
5069         if (!GCToOSInterface::CanEnableGCNumaAware())
5070             memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node)); 
5071
5072         return TRUE;
5073     }
5074
5075     static void init_cpu_mapping(gc_heap * /*heap*/, int heap_number)
5076     {
5077         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5078         {
5079             uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
5080             // We can safely cast heap_number to a uint16_t 'cause GetCurrentProcessCpuCount
5081             // only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
5082             // MAX_SUPPORTED_CPUS GC threads.
5083             proc_no_to_heap_no[proc_no] = (uint16_t)heap_number;
5084         }
5085     }
5086
5087     static void mark_heap(int heap_number)
5088     {
5089         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5090             return;
5091
5092         for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
5093             sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5094     }
5095
5096     static int select_heap(alloc_context* acontext, int /*hint*/)
5097     {
5098         UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf
5099
5100         if (GCToOSInterface::CanGetCurrentProcessorNumber())
5101             return proc_no_to_heap_no[GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps];
5102
5103         unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
5104         sniff_index %= n_sniff_buffers;
5105
5106         int best_heap = 0;
5107         int best_access_time = 1000*1000*1000;
5108         int second_best_access_time = best_access_time;
5109
5110         uint8_t *l_sniff_buffer = sniff_buffer;
5111         unsigned l_n_sniff_buffers = n_sniff_buffers;
5112         for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++)
5113         {
5114             int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers);
5115             if (this_access_time < best_access_time)
5116             {
5117                 second_best_access_time = best_access_time;
5118                 best_access_time = this_access_time;
5119                 best_heap = heap_number;
5120             }
5121             else if (this_access_time < second_best_access_time)
5122             {
5123                 second_best_access_time = this_access_time;
5124             }
5125         }
5126
5127         if (best_access_time*2 < second_best_access_time)
5128         {
5129             sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5130
5131             dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext));
5132         }
5133         else
5134         {
5135             dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext ));
5136         }
5137
5138         return best_heap;
5139     }
5140
5141     static bool can_find_heap_fast()
5142     {
5143         return GCToOSInterface::CanGetCurrentProcessorNumber();
5144     }
5145
5146     static uint16_t find_proc_no_from_heap_no(int heap_number)
5147     {
5148         return heap_no_to_proc_no[heap_number];
5149     }
5150
5151     static void set_proc_no_for_heap(int heap_number, uint16_t proc_no)
5152     {
5153         heap_no_to_proc_no[heap_number] = proc_no;
5154     }
5155
5156     static uint16_t find_numa_node_from_heap_no(int heap_number)
5157     {
5158         return heap_no_to_numa_node[heap_number];
5159     }
5160
5161     static void set_numa_node_for_heap(int heap_number, uint16_t numa_node)
5162     {
5163         heap_no_to_numa_node[heap_number] = numa_node;
5164     }
5165
5166     static uint16_t find_cpu_group_from_heap_no(int heap_number)
5167     {
5168         return heap_no_to_cpu_group[heap_number];
5169     }
5170
5171     static void set_cpu_group_for_heap(int heap_number, uint16_t group_number)
5172     {
5173         heap_no_to_cpu_group[heap_number] = group_number;
5174     }
5175
5176     static uint16_t find_group_proc_from_heap_no(int heap_number)
5177     {
5178         return heap_no_to_group_proc[heap_number];
5179     }
5180
5181     static void set_group_proc_for_heap(int heap_number, uint16_t group_proc)
5182     {
5183         heap_no_to_group_proc[heap_number] = group_proc;
5184     }
5185
5186     static void init_numa_node_to_heap_map(int nheaps)
5187     {   // called right after GCHeap::Init() for each heap is finished
5188         // when numa is not enabled, heap_no_to_numa_node[] are all filled
5189         // with 0s during initialization, and will be treated as one node
5190         numa_node_to_heap_map[0] = 0;
5191         int node_index = 1;
5192
5193         for (int i=1; i < nheaps; i++)
5194         {
5195             if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1])
5196                 numa_node_to_heap_map[node_index++] = (uint16_t)i;
5197         }
5198         numa_node_to_heap_map[node_index] = (uint16_t)nheaps; //mark the end with nheaps
5199     }
5200
5201     static void get_heap_range_for_heap(int hn, int* start, int* end)
5202     {   // 1-tier/no numa case: heap_no_to_numa_node[] all zeros, 
5203         // and treated as in one node. thus: start=0, end=n_heaps
5204         uint16_t numa_node = heap_no_to_numa_node[hn];
5205         *start = (int)numa_node_to_heap_map[numa_node];
5206         *end   = (int)(numa_node_to_heap_map[numa_node+1]);
5207     }
5208 };
5209 uint8_t* heap_select::sniff_buffer;
5210 unsigned heap_select::n_sniff_buffers;
5211 unsigned heap_select::cur_sniff_index;
5212 uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5213 uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5214 uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5215 uint16_t heap_select::heap_no_to_cpu_group[MAX_SUPPORTED_CPUS];
5216 uint16_t heap_select::heap_no_to_group_proc[MAX_SUPPORTED_CPUS];
5217 uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5218
5219 BOOL gc_heap::create_thread_support (unsigned number_of_heaps)
5220 {
5221     BOOL ret = FALSE;
5222     if (!gc_start_event.CreateOSManualEventNoThrow (FALSE))
5223     {
5224         goto cleanup;
5225     }
5226     if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE))
5227     {
5228         goto cleanup;
5229     }
5230     if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc))
5231     {
5232         goto cleanup;
5233     }
5234
5235     ret = TRUE;
5236
5237 cleanup:
5238
5239     if (!ret)
5240     {
5241         destroy_thread_support();
5242     }
5243
5244     return ret;
5245 }
5246
5247 void gc_heap::destroy_thread_support ()
5248 {
5249     if (ee_suspend_event.IsValid())
5250     {
5251         ee_suspend_event.CloseEvent();
5252     }
5253     if (gc_start_event.IsValid())
5254     {
5255         gc_start_event.CloseEvent();
5256     }
5257 }
5258
5259 void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affinity)
5260 {
5261     affinity->Group = GCThreadAffinity::None;
5262     affinity->Processor = GCThreadAffinity::None;
5263
5264     uint16_t gn, gpn;
5265     GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
5266
5267     int bit_number = 0;
5268     for (uintptr_t mask = 1; mask !=0; mask <<=1) 
5269     {
5270         if (bit_number == gpn)
5271         {
5272             dprintf(3, ("using processor group %d, mask %Ix for heap %d\n", gn, mask, heap_number));
5273             affinity->Processor = gpn;
5274             affinity->Group = gn;
5275             heap_select::set_cpu_group_for_heap(heap_number, gn);
5276             heap_select::set_group_proc_for_heap(heap_number, gpn);
5277             if (GCToOSInterface::CanEnableGCNumaAware())
5278             {  
5279                 PROCESSOR_NUMBER proc_no;
5280                 proc_no.Group    = gn;
5281                 proc_no.Number   = (uint8_t)gpn;
5282                 proc_no.Reserved = 0;
5283
5284                 uint16_t node_no = 0;
5285                 if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
5286                     heap_select::set_numa_node_for_heap(heap_number, node_no);
5287             }
5288             else
5289             {   // no numa setting, each cpu group is treated as a node
5290                 heap_select::set_numa_node_for_heap(heap_number, gn);
5291             }
5292             return;
5293         }
5294         bit_number++;
5295     }
5296 }
5297
5298 void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affinity)
5299 {
5300     affinity->Group = GCThreadAffinity::None;
5301     affinity->Processor = GCThreadAffinity::None;
5302
5303     uintptr_t pmask, smask;
5304     if (GCToOSInterface::GetCurrentProcessAffinityMask(&pmask, &smask))
5305     {
5306         pmask &= smask;
5307         int bit_number = 0; 
5308         uint8_t proc_number = 0;
5309         for (uintptr_t mask = 1; mask != 0; mask <<= 1)
5310         {
5311             if ((mask & pmask) != 0)
5312             {
5313                 if (bit_number == heap_number)
5314                 {
5315                     dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
5316                     affinity->Processor = proc_number;
5317                     heap_select::set_proc_no_for_heap(heap_number, proc_number);
5318                     if (GCToOSInterface::CanEnableGCNumaAware())
5319                     {
5320                         uint16_t node_no = 0;
5321                         PROCESSOR_NUMBER proc_no;
5322                         proc_no.Group = 0;
5323                         proc_no.Number = (uint8_t)proc_number;
5324                         proc_no.Reserved = 0;
5325                         if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
5326                         {
5327                             heap_select::set_numa_node_for_heap(heap_number, node_no);
5328                         }
5329                     }
5330                     return;
5331                 }
5332                 bit_number++;
5333             }
5334             proc_number++;
5335         }
5336     }
5337 }
5338
5339 bool gc_heap::create_gc_thread ()
5340 {
5341     dprintf (3, ("Creating gc thread\n"));
5342     return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC");
5343 }
5344
5345 #ifdef _MSC_VER
5346 #pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5347 #endif //_MSC_VER
5348 void gc_heap::gc_thread_function ()
5349 {
5350     assert (gc_done_event.IsValid());
5351     assert (gc_start_event.IsValid());
5352     dprintf (3, ("gc thread started"));
5353
5354     heap_select::init_cpu_mapping(this, heap_number);
5355
5356     while (1)
5357     {
5358         assert (!gc_t_join.joined());
5359
5360         if (heap_number == 0)
5361         {
5362             gc_heap::ee_suspend_event.Wait(INFINITE, FALSE);
5363
5364             BEGIN_TIMING(suspend_ee_during_log);
5365             GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
5366             END_TIMING(suspend_ee_during_log);
5367
5368             proceed_with_gc_p = TRUE;
5369
5370             if (!should_proceed_with_gc())
5371             {
5372                 update_collection_counts_for_no_gc();
5373                 proceed_with_gc_p = FALSE;
5374             }
5375             else
5376             {
5377                 settings.init_mechanisms();
5378                 gc_start_event.Set();
5379             }
5380             dprintf (3, ("%d gc thread waiting...", heap_number));
5381         }
5382         else
5383         {
5384             gc_start_event.Wait(INFINITE, FALSE);
5385             dprintf (3, ("%d gc thread waiting... Done", heap_number));
5386         }
5387
5388         assert ((heap_number == 0) || proceed_with_gc_p);
5389
5390         if (proceed_with_gc_p)
5391             garbage_collect (GCHeap::GcCondemnedGeneration);
5392
5393         if (heap_number == 0)
5394         {
5395             if (proceed_with_gc_p && (!settings.concurrent))
5396             {
5397                 do_post_gc();
5398             }
5399
5400 #ifdef BACKGROUND_GC
5401             recover_bgc_settings();
5402 #endif //BACKGROUND_GC
5403
5404 #ifdef MULTIPLE_HEAPS
5405             for (int i = 0; i < gc_heap::n_heaps; i++)
5406             {
5407                 gc_heap* hp = gc_heap::g_heaps[i];
5408                 hp->add_saved_spinlock_info (me_release, mt_block_gc);
5409                 dprintf (SPINLOCK_LOG, ("[%d]GC Lmsl", i));
5410                 leave_spin_lock(&hp->more_space_lock);
5411             }
5412 #endif //MULTIPLE_HEAPS
5413
5414             gc_heap::gc_started = FALSE;
5415
5416             BEGIN_TIMING(restart_ee_during_log);
5417             GCToEEInterface::RestartEE(TRUE);
5418             END_TIMING(restart_ee_during_log);
5419             process_sync_log_stats();
5420
5421             dprintf (SPINLOCK_LOG, ("GC Lgc"));
5422             leave_spin_lock (&gc_heap::gc_lock);
5423
5424             gc_heap::internal_gc_done = true;
5425
5426             if (proceed_with_gc_p)
5427                 set_gc_done();
5428             else
5429             {
5430                 // If we didn't actually do a GC, it means we didn't wait up the other threads,
5431                 // we still need to set the gc_done_event for those threads.
5432                 for (int i = 0; i < gc_heap::n_heaps; i++)
5433                 {
5434                     gc_heap* hp = gc_heap::g_heaps[i];
5435                     hp->set_gc_done();
5436                 }
5437             }
5438         }
5439         else
5440         {
5441             int spin_count = 32 * (gc_heap::n_heaps - 1);
5442
5443             // wait until RestartEE has progressed to a stage where we can restart user threads
5444             while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads())
5445             {
5446                 spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads()));
5447             }
5448             set_gc_done();
5449         }
5450     }
5451 }
5452 #ifdef _MSC_VER
5453 #pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5454 #endif //_MSC_VER
5455
5456 #endif //MULTIPLE_HEAPS
5457
5458 bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
5459 {
5460 #if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
5461     // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
5462     // a host. This will need to be added later.
5463 #if !defined(FEATURE_CORECLR)
5464     if (!CLRMemoryHosted())
5465 #endif
5466     {
5467         if (GCToOSInterface::CanEnableGCNumaAware())
5468         {
5469             uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
5470             if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
5471                 return true;
5472         }
5473     }
5474 #else
5475     UNREFERENCED_PARAMETER(h_number);
5476 #endif
5477
5478     //numa aware not enabled, or call failed --> fallback to VirtualCommit()
5479     return GCToOSInterface::VirtualCommit(addr, size);
5480 }
5481
5482 #ifndef SEG_MAPPING_TABLE
5483 inline
5484 heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
5485 {
5486     uint8_t* sadd = add;
5487     heap_segment* hs = 0;
5488     heap_segment* hs1 = 0;
5489     if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
5490     {
5491         delta = 0;
5492         return 0;
5493     }
5494     //repeat in case there is a concurrent insertion in the table.
5495     do
5496     {
5497         hs = hs1;
5498         sadd = add;
5499         seg_table->lookup (sadd);
5500         hs1 = (heap_segment*)sadd;
5501     } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));
5502
5503     hs = hs1;
5504
5505     if ((hs == 0) ||
5506         (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
5507         delta = 0;
5508     return hs;
5509 }
5510 #endif //SEG_MAPPING_TABLE
5511
5512 class mark
5513 {
5514 public:
5515     uint8_t* first;
5516     size_t len;
5517
5518     // If we want to save space we can have a pool of plug_and_gap's instead of 
5519     // always having 2 allocated for each pinned plug.
5520     gap_reloc_pair saved_pre_plug;
5521     // If we decide to not compact, we need to restore the original values.
5522     gap_reloc_pair saved_pre_plug_reloc;
5523
5524     gap_reloc_pair saved_post_plug;
5525
5526     // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke 
5527     // frames. Also if it's an artificially pinned plug created by us, it can certainly 
5528     // have references. 
5529     // We know these cases will be rare so we can optimize this to be only allocated on decommand. 
5530     gap_reloc_pair saved_post_plug_reloc;
5531
5532     // We need to calculate this after we are done with plan phase and before compact
5533     // phase because compact phase will change the bricks so relocate_address will no 
5534     // longer work.
5535     uint8_t* saved_pre_plug_info_reloc_start;
5536
5537     // We need to save this because we will have no way to calculate it, unlike the 
5538     // pre plug info start which is right before this plug.
5539     uint8_t* saved_post_plug_info_start;
5540
5541 #ifdef SHORT_PLUGS
5542     uint8_t* allocation_context_start_region;
5543 #endif //SHORT_PLUGS
5544
5545     // How the bits in these bytes are organized:
5546     // MSB --> LSB
5547     // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit
5548     // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0.
5549     BOOL saved_pre_p;
5550     BOOL saved_post_p;
5551
5552 #ifdef _DEBUG
5553     // We are seeing this is getting corrupted for a PP with a NP after.
5554     // Save it when we first set it and make sure it doesn't change.
5555     gap_reloc_pair saved_post_plug_debug;
5556 #endif //_DEBUG
5557
5558     size_t get_max_short_bits()
5559     {
5560         return (sizeof (gap_reloc_pair) / sizeof (uint8_t*));
5561     }
5562
5563     // pre bits
5564     size_t get_pre_short_start_bit ()
5565     {
5566         return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5567     }
5568
5569     BOOL pre_short_p()
5570     {
5571         return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1)));
5572     }
5573
5574     void set_pre_short()
5575     {
5576         saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1));
5577     }
5578
5579     void set_pre_short_bit (size_t bit)
5580     {
5581         saved_pre_p |= 1 << (get_pre_short_start_bit() + bit);
5582     }
5583
5584     BOOL pre_short_bit_p (size_t bit)
5585     {
5586         return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit)));
5587     }
5588
5589 #ifdef COLLECTIBLE_CLASS
5590     void set_pre_short_collectible()
5591     {
5592         saved_pre_p |= 2;
5593     }
5594
5595     BOOL pre_short_collectible_p()
5596     {
5597         return (saved_pre_p & 2);
5598     }
5599 #endif //COLLECTIBLE_CLASS
5600
5601     // post bits
5602     size_t get_post_short_start_bit ()
5603     {
5604         return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5605     }
5606
5607     BOOL post_short_p()
5608     {
5609         return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1)));
5610     }
5611
5612     void set_post_short()
5613     {
5614         saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1));
5615     }
5616
5617     void set_post_short_bit (size_t bit)
5618     {
5619         saved_post_p |= 1 << (get_post_short_start_bit() + bit);
5620     }
5621
5622     BOOL post_short_bit_p (size_t bit)
5623     {
5624         return (saved_post_p & (1 << (get_post_short_start_bit() + bit)));
5625     }
5626
5627 #ifdef COLLECTIBLE_CLASS
5628     void set_post_short_collectible()
5629     {
5630         saved_post_p |= 2;
5631     }
5632
5633     BOOL post_short_collectible_p()
5634     {
5635         return (saved_post_p & 2);
5636     }
5637 #endif //COLLECTIBLE_CLASS
5638
5639     uint8_t* get_plug_address() { return first; }
5640
5641     BOOL has_pre_plug_info() { return saved_pre_p; }
5642     BOOL has_post_plug_info() { return saved_post_p; }
5643
5644     gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; }
5645     gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; }
5646     void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; }
5647     uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; }
5648
5649     // We need to temporarily recover the shortened plugs for compact phase so we can
5650     // copy over the whole plug and their related info (mark bits/cards). But we will
5651     // need to set the artificial gap back so compact phase can keep reading the plug info.
5652     // We also need to recover the saved info because we'll need to recover it later.
5653     // 
5654     // So we would call swap_p*_plug_and_saved once to recover the object info; then call 
5655     // it again to recover the artificial gap.
5656     void swap_pre_plug_and_saved()
5657     {
5658         gap_reloc_pair temp;
5659         memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5660         memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5661         saved_pre_plug_reloc = temp;
5662     }
5663
5664     void swap_post_plug_and_saved()
5665     {
5666         gap_reloc_pair temp;
5667         memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5668         memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5669         saved_post_plug_reloc = temp;
5670     }
5671
5672     void swap_pre_plug_and_saved_for_profiler()
5673     {
5674         gap_reloc_pair temp;
5675         memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5676         memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5677         saved_pre_plug = temp;
5678     }
5679
5680     void swap_post_plug_and_saved_for_profiler()
5681     {
5682         gap_reloc_pair temp;
5683         memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5684         memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5685         saved_post_plug = temp;
5686     }
5687
5688     // We should think about whether it's really necessary to have to copy back the pre plug
5689     // info since it was already copied during compacting plugs. But if a plug doesn't move
5690     // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info.
5691     void recover_plug_info() 
5692     {
5693         if (saved_pre_p)
5694         {
5695             if (gc_heap::settings.compaction)
5696             {
5697                 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", 
5698                     first,
5699                     &saved_pre_plug_reloc, 
5700                     saved_pre_plug_info_reloc_start));
5701                 memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5702             }
5703             else
5704             {
5705                 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix", 
5706                     first,
5707                     &saved_pre_plug, 
5708                     (first - sizeof (plug_and_gap))));
5709                 memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5710             }
5711         }
5712
5713         if (saved_post_p)
5714         {
5715             if (gc_heap::settings.compaction)
5716             {
5717                 dprintf (3, ("%Ix: REC Post: %Ix-%Ix", 
5718                     first,
5719                     &saved_post_plug_reloc, 
5720                     saved_post_plug_info_start));
5721                 memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5722             }
5723             else
5724             {
5725                 dprintf (3, ("%Ix: REC Post: %Ix-%Ix", 
5726                     first,
5727                     &saved_post_plug, 
5728                     saved_post_plug_info_start));
5729                 memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5730             }
5731         }
5732     }
5733 };
5734
5735
5736 void gc_mechanisms::init_mechanisms()
5737 {
5738     condemned_generation = 0;
5739     promotion = FALSE;//TRUE;
5740     compaction = TRUE;
5741 #ifdef FEATURE_LOH_COMPACTION
5742     loh_compaction = gc_heap::should_compact_loh();
5743 #else
5744     loh_compaction = FALSE;
5745 #endif //FEATURE_LOH_COMPACTION
5746     heap_expansion = FALSE;
5747     concurrent = FALSE;
5748     demotion = FALSE;
5749     elevation_reduced = FALSE;
5750     found_finalizers = FALSE;
5751 #ifdef BACKGROUND_GC
5752     background_p = recursive_gc_sync::background_running_p() != FALSE;
5753     allocations_allowed = TRUE;
5754 #endif //BACKGROUND_GC
5755
5756     entry_memory_load = 0;
5757     exit_memory_load = 0;
5758
5759 #ifdef STRESS_HEAP
5760     stress_induced = FALSE;
5761 #endif // STRESS_HEAP
5762 }
5763
5764 void gc_mechanisms::first_init()
5765 {
5766     gc_index = 0;
5767     gen0_reduction_count = 0;
5768     should_lock_elevation = FALSE;
5769     elevation_locked_count = 0;
5770     reason = reason_empty;
5771 #ifdef BACKGROUND_GC
5772     pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch;
5773 #ifdef _DEBUG
5774     int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode());
5775     if (debug_pause_mode >= 0)
5776     {
5777         assert (debug_pause_mode <= pause_sustained_low_latency);
5778         pause_mode = (gc_pause_mode)debug_pause_mode;
5779     }
5780 #endif //_DEBUG
5781 #else //BACKGROUND_GC
5782     pause_mode = pause_batch;
5783 #endif //BACKGROUND_GC
5784
5785     init_mechanisms();
5786 }
5787
5788 void gc_mechanisms::record (gc_history_global* history)
5789 {
5790 #ifdef MULTIPLE_HEAPS
5791     history->num_heaps = gc_heap::n_heaps;
5792 #else
5793     history->num_heaps = 1;
5794 #endif //MULTIPLE_HEAPS
5795
5796     history->condemned_generation = condemned_generation;
5797     history->gen0_reduction_count = gen0_reduction_count;
5798     history->reason = reason;
5799     history->pause_mode = (int)pause_mode;
5800     history->mem_pressure = entry_memory_load;
5801     history->global_mechanims_p = 0;
5802
5803     // start setting the boolean values.
5804     if (concurrent)
5805         history->set_mechanism_p (global_concurrent);
5806     
5807     if (compaction)
5808         history->set_mechanism_p (global_compaction);
5809
5810     if (promotion)
5811         history->set_mechanism_p (global_promotion);
5812     
5813     if (demotion)
5814         history->set_mechanism_p (global_demotion);
5815
5816     if (card_bundles)
5817         history->set_mechanism_p (global_card_bundles);
5818
5819     if (elevation_reduced)
5820         history->set_mechanism_p (global_elevation);
5821 }
5822
5823 /**********************************
5824    called at the beginning of GC to fix the allocated size to
5825    what is really allocated, or to turn the free area into an unused object
5826    It needs to be called after all of the other allocation contexts have been
5827    fixed since it relies on alloc_allocated.
5828  ********************************/
5829
5830 //for_gc_p indicates that the work is being done for GC,
5831 //as opposed to concurrent heap verification
5832 void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
5833 {
5834     UNREFERENCED_PARAMETER(for_gc_p);
5835
5836     // The gen 0 alloc context is never used for allocation in the allocator path. It's
5837     // still used in the allocation path during GCs.
5838     assert (generation_allocation_pointer (youngest_generation) == nullptr);
5839     assert (generation_allocation_limit (youngest_generation) == nullptr);
5840     heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
5841 }
5842
5843 void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
5844 {
5845     UNREFERENCED_PARAMETER(for_gc_p);
5846
5847 #ifdef _DEBUG
5848     alloc_context* acontext = 
5849 #endif // _DEBUG
5850         generation_alloc_context (large_object_generation);
5851     assert (acontext->alloc_ptr == 0);
5852     assert (acontext->alloc_limit == 0); 
5853 #if 0
5854     dprintf (3, ("Large object alloc context: ptr: %Ix, limit %Ix",
5855                  (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5856     fix_allocation_context (acontext, FALSE, get_alignment_constant (FALSE));
5857     if (for_gc_p)
5858     {
5859         acontext->alloc_ptr = 0;
5860         acontext->alloc_limit = acontext->alloc_ptr;
5861     }
5862 #endif //0
5863 }
5864
5865 //for_gc_p indicates that the work is being done for GC,
5866 //as opposed to concurrent heap verification
5867 void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
5868                                       int align_const)
5869 {
5870     dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix",
5871                  (size_t)acontext,
5872                  (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5873
5874     if (((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) ||
5875         !for_gc_p)
5876     {
5877         uint8_t*  point = acontext->alloc_ptr;
5878         if (point != 0)
5879         {
5880             size_t  size = (acontext->alloc_limit - acontext->alloc_ptr);
5881             // the allocation area was from the free list
5882             // it was shortened by Align (min_obj_size) to make room for
5883             // at least the shortest unused object
5884             size += Align (min_obj_size, align_const);
5885             assert ((size >= Align (min_obj_size)));
5886
5887             dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point,
5888                        (size_t)point + size ));
5889             make_unused_array (point, size);
5890
5891             if (for_gc_p)
5892             {
5893                 generation_free_obj_space (generation_of (0)) += size;
5894                 alloc_contexts_used ++;
5895             }
5896         }
5897     }
5898     else if (for_gc_p)
5899     {
5900         alloc_allocated = acontext->alloc_ptr;
5901         assert (heap_segment_allocated (ephemeral_heap_segment) <=
5902                 heap_segment_committed (ephemeral_heap_segment));
5903         alloc_contexts_used ++;
5904     }
5905
5906     if (for_gc_p)
5907     {
5908         // We need to update the alloc_bytes to reflect the portion that we have not used  
5909         acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr);  
5910         acontext->alloc_ptr = 0;
5911         acontext->alloc_limit = acontext->alloc_ptr;
5912     }
5913 }
5914
5915 //used by the heap verification for concurrent gc.
5916 //it nulls out the words set by fix_allocation_context for heap_verification
5917 void repair_allocation (gc_alloc_context* acontext, void*)
5918 {
5919     uint8_t*  point = acontext->alloc_ptr;
5920
5921     if (point != 0)
5922     {
5923         dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5924                      (size_t)acontext->alloc_limit+Align(min_obj_size)));
5925         memclr (acontext->alloc_ptr - plug_skew,
5926                 (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size));
5927     }
5928 }
5929
5930 void void_allocation (gc_alloc_context* acontext, void*)
5931 {
5932     uint8_t*  point = acontext->alloc_ptr;
5933
5934     if (point != 0)
5935     {
5936         dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5937                      (size_t)acontext->alloc_limit+Align(min_obj_size)));
5938         acontext->alloc_ptr = 0;
5939         acontext->alloc_limit = acontext->alloc_ptr;
5940     }
5941 }
5942
5943 void gc_heap::repair_allocation_contexts (BOOL repair_p)
5944 {
5945     GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL);
5946 }
5947
5948 struct fix_alloc_context_args
5949 {
5950     BOOL for_gc_p;
5951     void* heap;
5952 };
5953
5954 void fix_alloc_context(gc_alloc_context* acontext, void* param)
5955 {
5956     fix_alloc_context_args* args = (fix_alloc_context_args*)param;
5957     g_theGCHeap->FixAllocContext(acontext, false, (void*)(size_t)(args->for_gc_p), args->heap);
5958 }
5959
5960 void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
5961 {
5962     fix_alloc_context_args args;
5963     args.for_gc_p = for_gc_p;
5964     args.heap = __this;
5965
5966     GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
5967     fix_youngest_allocation_area(for_gc_p);
5968     fix_large_allocation_area(for_gc_p);
5969 }
5970
5971 void gc_heap::fix_older_allocation_area (generation* older_gen)
5972 {
5973     heap_segment* older_gen_seg = generation_allocation_segment (older_gen);
5974     if (generation_allocation_limit (older_gen) !=
5975         heap_segment_plan_allocated (older_gen_seg))
5976     {
5977         uint8_t*  point = generation_allocation_pointer (older_gen);
5978
5979         size_t  size = (generation_allocation_limit (older_gen) -
5980                                generation_allocation_pointer (older_gen));
5981         if (size != 0)
5982         {
5983             assert ((size >= Align (min_obj_size)));
5984             dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size));
5985             make_unused_array (point, size);
5986         }
5987     }
5988     else
5989     {
5990         assert (older_gen_seg != ephemeral_heap_segment);
5991         heap_segment_plan_allocated (older_gen_seg) =
5992             generation_allocation_pointer (older_gen);
5993         generation_allocation_limit (older_gen) =
5994             generation_allocation_pointer (older_gen);
5995     }
5996 }
5997
5998 void gc_heap::set_allocation_heap_segment (generation* gen)
5999 {
6000     uint8_t* p = generation_allocation_start (gen);
6001     assert (p);
6002     heap_segment* seg = generation_allocation_segment (gen);
6003     if (in_range_for_segment (p, seg))
6004         return;
6005
6006     // try ephemeral heap segment in case of heap expansion
6007     seg = ephemeral_heap_segment;
6008     if (!in_range_for_segment (p, seg))
6009     {
6010         seg = heap_segment_rw (generation_start_segment (gen));
6011
6012         PREFIX_ASSUME(seg != NULL);
6013
6014         while (!in_range_for_segment (p, seg))
6015         {
6016             seg = heap_segment_next_rw (seg);
6017             PREFIX_ASSUME(seg != NULL);
6018         }
6019     }
6020
6021     generation_allocation_segment (gen) = seg;
6022 }
6023
6024 void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
6025 {
6026     assert (start);
6027     assert (Align ((size_t)start) == (size_t)start);
6028     generation_allocation_start (gen) = start;
6029     generation_allocation_pointer (gen) =  0;//start + Align (min_obj_size);
6030     generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen);
6031     set_allocation_heap_segment (gen);
6032 }
6033
6034 #ifdef BACKGROUND_GC
6035 //TODO BACKGROUND_GC this is for test only
6036 void
6037 gc_heap::disallow_new_allocation (int gen_number)
6038 {
6039     UNREFERENCED_PARAMETER(gen_number);
6040     settings.allocations_allowed = FALSE;
6041 }
6042 void
6043 gc_heap::allow_new_allocation (int gen_number)
6044 {
6045     UNREFERENCED_PARAMETER(gen_number);
6046     settings.allocations_allowed = TRUE;
6047 }
6048
6049 #endif //BACKGROUND_GC
6050
6051 bool gc_heap::new_allocation_allowed (int gen_number)
6052 {
6053 #ifdef BACKGROUND_GC
6054     //TODO BACKGROUND_GC this is for test only
6055     if (!settings.allocations_allowed)
6056     {
6057         dprintf (2, ("new allocation not allowed"));
6058         return FALSE;
6059     }
6060 #endif //BACKGROUND_GC
6061
6062     if (dd_new_allocation (dynamic_data_of (gen_number)) < 0)
6063     {
6064         if (gen_number != 0)
6065         {
6066             // For LOH we will give it more budget before we try a GC.
6067             if (settings.concurrent)
6068             {
6069                 dynamic_data* dd2 = dynamic_data_of (max_generation + 1 );
6070
6071                 if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2)))
6072                 {
6073                     return TRUE;
6074                 }
6075             }
6076         }
6077         return FALSE;
6078     }
6079 #ifndef MULTIPLE_HEAPS
6080     else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0))
6081     {
6082         dprintf (3, ("evaluating allocation rate"));
6083         dynamic_data* dd0 = dynamic_data_of (0);
6084         if ((allocation_running_amount - dd_new_allocation (dd0)) >
6085             dd_min_size (dd0))
6086         {
6087             uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
6088             if ((ctime - allocation_running_time) > 1000)
6089             {
6090                 dprintf (2, (">1s since last gen0 gc"));
6091                 return FALSE;
6092             }
6093             else
6094             {
6095                 allocation_running_amount = dd_new_allocation (dd0);
6096             }
6097         }
6098     }
6099 #endif //MULTIPLE_HEAPS
6100     return TRUE;
6101 }
6102
6103 inline
6104 ptrdiff_t gc_heap::get_desired_allocation (int gen_number)
6105 {
6106     return dd_desired_allocation (dynamic_data_of (gen_number));
6107 }
6108
6109 inline
6110 ptrdiff_t  gc_heap::get_new_allocation (int gen_number)
6111 {
6112     return dd_new_allocation (dynamic_data_of (gen_number));
6113 }
6114
6115 //return the amount allocated so far in gen_number
6116 inline
6117 ptrdiff_t  gc_heap::get_allocation (int gen_number)
6118 {
6119     dynamic_data* dd = dynamic_data_of (gen_number);
6120
6121     return dd_desired_allocation (dd) - dd_new_allocation (dd);
6122 }
6123
6124 inline
6125 BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len)
6126 {
6127     size_t new_size = max (init_len, 2*len);
6128     mark* tmp = new (nothrow) mark [new_size];
6129     if (tmp)
6130     {
6131         memcpy (tmp, m, len * sizeof (mark));
6132         delete m;
6133         m = tmp;
6134         len = new_size;
6135         return TRUE;
6136     }
6137     else
6138     {
6139         dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark))));
6140         return FALSE;
6141     }
6142 }
6143
6144 inline
6145 uint8_t* pinned_plug (mark* m)
6146 {
6147    return m->first;
6148 }
6149
6150 inline
6151 size_t& pinned_len (mark* m)
6152 {
6153     return m->len;
6154 }
6155
6156 inline
6157 void set_new_pin_info (mark* m, uint8_t* pin_free_space_start)
6158 {
6159     m->len = pinned_plug (m) - pin_free_space_start;
6160 #ifdef SHORT_PLUGS
6161     m->allocation_context_start_region = pin_free_space_start;
6162 #endif //SHORT_PLUGS
6163 }
6164
6165 #ifdef SHORT_PLUGS
6166 inline
6167 uint8_t*& pin_allocation_context_start_region (mark* m)
6168 {
6169     return m->allocation_context_start_region;
6170 }
6171
6172 uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry)
6173 {
6174     uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info());
6175     uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap)));
6176     //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix", 
6177     //    old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6178     dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6179     return plug_start_in_saved;
6180 }
6181
6182 inline
6183 void set_padding_in_expand (uint8_t* old_loc,
6184                             BOOL set_padding_on_saved_p,
6185                             mark* pinned_plug_entry)
6186 {
6187     if (set_padding_on_saved_p)
6188     {
6189         set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6190     }
6191     else
6192     {
6193         set_plug_padded (old_loc);
6194     }
6195 }
6196
6197 inline
6198 void clear_padding_in_expand (uint8_t* old_loc,
6199                               BOOL set_padding_on_saved_p,
6200                               mark* pinned_plug_entry)
6201 {
6202     if (set_padding_on_saved_p)
6203     {
6204         clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6205     }
6206     else
6207     {
6208         clear_plug_padded (old_loc);
6209     }
6210 }
6211 #endif //SHORT_PLUGS
6212
6213 void gc_heap::reset_pinned_queue()
6214 {
6215     mark_stack_tos = 0;
6216     mark_stack_bos = 0;
6217 }
6218
6219 void gc_heap::reset_pinned_queue_bos()
6220 {
6221     mark_stack_bos = 0;
6222 }
6223
6224 // last_pinned_plug is only for asserting purpose.
6225 void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size)
6226 {
6227     if (last_pinned_plug)
6228     {
6229         mark& last_m = mark_stack_array[mark_stack_tos - 1];
6230         assert (last_pinned_plug == last_m.first);
6231         if (last_m.saved_post_p)
6232         {
6233             last_m.saved_post_p = FALSE;
6234             dprintf (3, ("setting last plug %Ix post to false", last_m.first));
6235             // We need to recover what the gap has overwritten.
6236             memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair));
6237         }
6238         last_m.len += plug_size;
6239         dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len));
6240     }
6241 }
6242
6243 void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6244 {
6245     dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
6246     dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
6247     if (!(pinned_plug_que_empty_p()))
6248     {
6249         mark*  oldest_entry = oldest_pin();
6250         uint8_t* plug = pinned_plug (oldest_entry);
6251         if ((plug >= alloc_pointer) && (plug < alloc_limit))
6252         {
6253             alloc_limit = pinned_plug (oldest_entry);
6254             dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
6255                 alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
6256         }
6257     }
6258 }
6259
6260 void gc_heap::set_allocator_next_pin (generation* gen)
6261 {
6262     dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
6263     if (!(pinned_plug_que_empty_p()))
6264     {
6265         mark*  oldest_entry = oldest_pin();
6266         uint8_t* plug = pinned_plug (oldest_entry);
6267         if ((plug >= generation_allocation_pointer (gen)) &&
6268             (plug <  generation_allocation_limit (gen)))
6269         {
6270             generation_allocation_limit (gen) = pinned_plug (oldest_entry);
6271             dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)", 
6272                 gen->gen_num,
6273                 generation_allocation_pointer (gen), generation_allocation_limit (gen),
6274                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
6275         }
6276         else
6277             assert (!((plug < generation_allocation_pointer (gen)) &&
6278                       (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
6279     }
6280 }
6281
6282 // After we set the info, we increase tos.
6283 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6284 {
6285     UNREFERENCED_PARAMETER(last_pinned_plug);
6286
6287     mark& m = mark_stack_array[mark_stack_tos];
6288     assert (m.first == last_pinned_plug);
6289
6290     m.len = plug_len;
6291     mark_stack_tos++;
6292     set_allocator_next_pin (alloc_pointer, alloc_limit);
6293 }
6294
6295 // After we set the info, we increase tos.
6296 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
6297 {
6298     UNREFERENCED_PARAMETER(last_pinned_plug);
6299
6300     mark& m = mark_stack_array[mark_stack_tos];
6301     assert (m.first == last_pinned_plug);
6302
6303     m.len = plug_len;
6304     mark_stack_tos++;
6305     assert (gen != 0);
6306     // Why are we checking here? gen is never 0.
6307     if (gen != 0)
6308     {
6309         set_allocator_next_pin (gen);
6310     }
6311 }
6312
6313 size_t gc_heap::deque_pinned_plug ()
6314 {
6315     dprintf (3, ("dequed: %Id", mark_stack_bos));
6316     size_t m = mark_stack_bos;
6317     mark_stack_bos++;
6318     return m;
6319 }
6320
6321 inline
6322 mark* gc_heap::pinned_plug_of (size_t bos)
6323 {
6324     return &mark_stack_array [ bos ];
6325 }
6326
6327 inline
6328 mark* gc_heap::oldest_pin ()
6329 {
6330     return pinned_plug_of (mark_stack_bos);
6331 }
6332
6333 inline
6334 BOOL gc_heap::pinned_plug_que_empty_p ()
6335 {
6336     return (mark_stack_bos == mark_stack_tos);
6337 }
6338
6339 inline
6340 mark* gc_heap::before_oldest_pin()
6341 {
6342     if (mark_stack_bos >= 1)
6343         return pinned_plug_of (mark_stack_bos-1);
6344     else
6345         return 0;
6346 }
6347
6348 inline
6349 BOOL gc_heap::ephemeral_pointer_p (uint8_t* o)
6350 {
6351     return ((o >= ephemeral_low) && (o < ephemeral_high));
6352 }
6353
6354 #ifdef MH_SC_MARK
6355 inline
6356 int& gc_heap::mark_stack_busy()
6357 {
6358     return  g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
6359 }
6360 #endif //MH_SC_MARK
6361
6362 void gc_heap::make_mark_stack (mark* arr)
6363 {
6364     reset_pinned_queue();
6365     mark_stack_array = arr;
6366     mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6367 #ifdef MH_SC_MARK
6368     mark_stack_busy() = 0;
6369 #endif //MH_SC_MARK
6370 }
6371
6372 #ifdef BACKGROUND_GC
6373 inline
6374 size_t& gc_heap::bpromoted_bytes(int thread)
6375 {
6376 #ifdef MULTIPLE_HEAPS
6377     return g_bpromoted [thread*16];
6378 #else //MULTIPLE_HEAPS
6379     UNREFERENCED_PARAMETER(thread);
6380     return g_bpromoted;
6381 #endif //MULTIPLE_HEAPS
6382 }
6383
6384 void gc_heap::make_background_mark_stack (uint8_t** arr)
6385 {
6386     background_mark_stack_array = arr;
6387     background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6388     background_mark_stack_tos = arr;
6389 }
6390
6391 void gc_heap::make_c_mark_list (uint8_t** arr)
6392 {
6393     c_mark_list = arr;
6394     c_mark_list_index = 0;
6395     c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE);
6396 }
6397 #endif //BACKGROUND_GC
6398
6399
6400 #ifdef CARD_BUNDLE
6401
6402 // The card bundle keeps track of groups of card words.
6403 static const size_t card_bundle_word_width = 32;
6404
6405 // How do we express the fact that 32 bits (card_word_width) is one uint32_t?
6406 static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width));
6407
6408 inline
6409 size_t card_bundle_word (size_t cardb)
6410 {
6411     return cardb / card_bundle_word_width;
6412 }
6413
6414 inline
6415 uint32_t card_bundle_bit (size_t cardb)
6416 {
6417     return (uint32_t)(cardb % card_bundle_word_width);
6418 }
6419
6420 size_t align_cardw_on_bundle (size_t cardw)
6421 {
6422     return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
6423 }
6424
6425 // Get the card bundle representing a card word
6426 size_t cardw_card_bundle (size_t cardw)
6427 {
6428     return cardw / card_bundle_size;
6429 }
6430
6431 // Get the first card word in a card bundle
6432 size_t card_bundle_cardw (size_t cardb)
6433 {
6434     return cardb * card_bundle_size;
6435 }
6436
6437 // Clear the specified card bundle
6438 void gc_heap::card_bundle_clear (size_t cardb)
6439 {
6440     card_bundle_table [card_bundle_word (cardb)] &= ~(1 << card_bundle_bit (cardb));
6441     dprintf (1,("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
6442               (size_t)card_bundle_cardw (cardb+1)));
6443 }
6444
6445 void gc_heap::card_bundle_set (size_t cardb)
6446 {
6447     if (!card_bundle_set_p (cardb))
6448     {
6449         card_bundle_table [card_bundle_word (cardb)] |= (1 << card_bundle_bit (cardb));
6450     }
6451 }
6452
6453 // Set the card bundle bits between start_cardb and end_cardb
6454 void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
6455 {
6456     if (start_cardb == end_cardb)
6457     {
6458         card_bundle_set(start_cardb);
6459         return;
6460     }
6461
6462     size_t start_word = card_bundle_word (start_cardb);
6463     size_t end_word = card_bundle_word (end_cardb);
6464
6465     if (start_word < end_word)
6466     {
6467         // Set the partial words
6468         card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));
6469
6470         if (card_bundle_bit (end_cardb))
6471             card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));
6472
6473         // Set the full words
6474         for (size_t i = start_word + 1; i < end_word; i++)
6475             card_bundle_table [i] = ~0u;
6476     }
6477     else
6478     {
6479         card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
6480                                             lowbits (~0u, card_bundle_bit (end_cardb)));
6481     }
6482 }
6483
6484 // Indicates whether the specified bundle is set.
6485 BOOL gc_heap::card_bundle_set_p (size_t cardb)
6486 {
6487     return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb)));
6488 }
6489
6490 // Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end'
6491 size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
6492 {
6493     // Number of heap bytes represented by a card bundle word
6494     size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6495
6496     // Align the start of the region down
6497     from = (uint8_t*)((size_t)from & ~(cbw_span - 1));
6498
6499     // Align the end of the region up
6500     end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1));
6501
6502     // Make sure they're really aligned
6503     assert (((size_t)from & (cbw_span - 1)) == 0);
6504     assert (((size_t)end  & (cbw_span - 1)) == 0);
6505
6506     return ((end - from) / cbw_span) * sizeof (uint32_t);
6507 }
6508
6509 // Takes a pointer to a card bundle table and an address, and returns a pointer that represents
6510 // where a theoretical card bundle table that represents every address (starting from 0) would
6511 // start if the bundle word representing the address were to be located at the pointer passed in.
6512 // The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle
6513 // for a given address is using a simple shift operation on the address.
6514 uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address)
6515 {
6516     // The number of bytes of heap memory represented by a card bundle word
6517     const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6518
6519     // Each card bundle word is 32 bits
6520     return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t)));
6521 }
6522
6523 void gc_heap::enable_card_bundles ()
6524 {
6525     if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
6526     {
6527         dprintf (1, ("Enabling card bundles"));
6528
6529         // We initially set all of the card bundles
6530         card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
6531                           cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
6532         settings.card_bundles = TRUE;
6533     }
6534 }
6535
6536 BOOL gc_heap::card_bundles_enabled ()
6537 {
6538     return settings.card_bundles;
6539 }
6540
6541 #endif // CARD_BUNDLE
6542
6543 #if defined (_TARGET_AMD64_)
6544 #define brick_size ((size_t)4096)
6545 #else
6546 #define brick_size ((size_t)2048)
6547 #endif //_TARGET_AMD64_
6548
6549 inline
6550 size_t gc_heap::brick_of (uint8_t* add)
6551 {
6552     return (size_t)(add - lowest_address) / brick_size;
6553 }
6554
6555 inline
6556 uint8_t* gc_heap::brick_address (size_t brick)
6557 {
6558     return lowest_address + (brick_size * brick);
6559 }
6560
6561
6562 void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end)
6563 {
6564     for (size_t i = brick_of (from);i < brick_of (end); i++)
6565         brick_table[i] = 0;
6566 }
6567
6568 //codes for the brick entries:
6569 //entry == 0 -> not assigned
6570 //entry >0 offset is entry-1
6571 //entry <0 jump back entry bricks
6572
6573
6574 inline
6575 void gc_heap::set_brick (size_t index, ptrdiff_t val)
6576 {
6577     if (val < -32767)
6578     {
6579         val = -32767;
6580     }
6581     assert (val < 32767);
6582     if (val >= 0)
6583         brick_table [index] = (short)val+1;
6584     else
6585         brick_table [index] = (short)val;
6586 }
6587
6588 inline
6589 int gc_heap::get_brick_entry (size_t index)
6590 {
6591 #ifdef MULTIPLE_HEAPS
6592     return VolatileLoadWithoutBarrier(&brick_table [index]);
6593 #else
6594     return brick_table[index];
6595 #endif
6596 }
6597
6598
6599 inline
6600 uint8_t* align_on_brick (uint8_t* add)
6601 {
6602     return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1));
6603 }
6604
6605 inline
6606 uint8_t* align_lower_brick (uint8_t* add)
6607 {
6608     return (uint8_t*)(((size_t)add) & ~(brick_size - 1));
6609 }
6610
6611 size_t size_brick_of (uint8_t* from, uint8_t* end)
6612 {
6613     assert (((size_t)from & (brick_size-1)) == 0);
6614     assert (((size_t)end  & (brick_size-1)) == 0);
6615
6616     return ((end - from) / brick_size) * sizeof (short);
6617 }
6618
6619 inline
6620 uint8_t* gc_heap::card_address (size_t card)
6621 {
6622     return  (uint8_t*) (card_size * card);
6623 }
6624
6625 inline
6626 size_t gc_heap::card_of ( uint8_t* object)
6627 {
6628     return (size_t)(object) / card_size;
6629 }
6630
6631 inline
6632 size_t gc_heap::card_to_brick (size_t card)
6633 {
6634     return brick_of (card_address (card));
6635 }
6636
6637 inline
6638 uint8_t* align_on_card (uint8_t* add)
6639 {
6640     return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 ));
6641 }
6642 inline
6643 uint8_t* align_on_card_word (uint8_t* add)
6644 {
6645     return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1));
6646 }
6647
6648 inline
6649 uint8_t* align_lower_card (uint8_t* add)
6650 {
6651     return (uint8_t*)((size_t)add & ~(card_size-1));
6652 }
6653
6654 inline
6655 void gc_heap::clear_card (size_t card)
6656 {
6657     card_table [card_word (card)] =
6658         (card_table [card_word (card)] & ~(1 << card_bit (card)));
6659     dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card),
6660               (size_t)card_address (card+1)));
6661 }
6662
6663 inline
6664 void gc_heap::set_card (size_t card)
6665 {
6666     size_t word = card_word (card);
6667     card_table[word] = (card_table [word] | (1 << card_bit (card)));
6668
6669 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
6670     // Also set the card bundle that corresponds to the card
6671     size_t bundle_to_set = cardw_card_bundle(word);
6672
6673     card_bundle_set(bundle_to_set);
6674
6675     dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set));
6676     assert(card_bundle_set_p(bundle_to_set) != 0);
6677 #endif
6678 }
6679
6680 inline
6681 BOOL  gc_heap::card_set_p (size_t card)
6682 {
6683     return ( card_table [ card_word (card) ] & (1 << card_bit (card)));
6684 }
6685
6686 // Returns the number of DWORDs in the card table that cover the
6687 // range of addresses [from, end[.
6688 size_t count_card_of (uint8_t* from, uint8_t* end)
6689 {
6690     return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1;
6691 }
6692
6693 // Returns the number of bytes to allocate for a card table
6694 // that covers the range of addresses [from, end[.
6695 size_t size_card_of (uint8_t* from, uint8_t* end)
6696 {
6697     return count_card_of (from, end) * sizeof(uint32_t);
6698 }
6699
6700 // We don't store seg_mapping_table in card_table_info because there's only always one view.
6701 class card_table_info
6702 {
6703 public:
6704     unsigned    recount;
6705     uint8_t*    lowest_address;
6706     uint8_t*    highest_address;
6707     short*      brick_table;
6708
6709 #ifdef CARD_BUNDLE
6710     uint32_t*   card_bundle_table;
6711 #endif //CARD_BUNDLE
6712
6713     // mark_array is always at the end of the data structure because we
6714     // want to be able to make one commit call for everything before it.
6715 #ifdef MARK_ARRAY
6716     uint32_t*   mark_array;
6717 #endif //MARK_ARRAY
6718
6719     size_t      size;
6720     uint32_t*   next_card_table;
6721 };
6722
6723 //These are accessors on untranslated cardtable
6724 inline
6725 unsigned& card_table_refcount (uint32_t* c_table)
6726 {
6727     return *(unsigned*)((char*)c_table - sizeof (card_table_info));
6728 }
6729
6730 inline
6731 uint8_t*& card_table_lowest_address (uint32_t* c_table)
6732 {
6733     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address;
6734 }
6735
6736 uint32_t* translate_card_table (uint32_t* ct)
6737 {
6738     return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t));
6739 }
6740
6741 inline
6742 uint8_t*& card_table_highest_address (uint32_t* c_table)
6743 {
6744     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address;
6745 }
6746
6747 inline
6748 short*& card_table_brick_table (uint32_t* c_table)
6749 {
6750     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table;
6751 }
6752
6753 #ifdef CARD_BUNDLE
6754 // Get the card bundle table for the specified card table.
6755 inline
6756 uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
6757 {
6758     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table;
6759 }
6760 #endif //CARD_BUNDLE
6761
6762 #ifdef MARK_ARRAY
6763 /* Support for mark_array */
6764
6765 inline
6766 uint32_t*& card_table_mark_array (uint32_t* c_table)
6767 {
6768     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array;
6769 }
6770
6771 #ifdef BIT64
6772 #define mark_bit_pitch ((size_t)16)
6773 #else
6774 #define mark_bit_pitch ((size_t)8)
6775 #endif // BIT64
6776 #define mark_word_width ((size_t)32)
6777 #define mark_word_size (mark_word_width * mark_bit_pitch)
6778
6779 inline
6780 uint8_t* align_on_mark_bit (uint8_t* add)
6781 {
6782     return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1));
6783 }
6784
6785 inline
6786 uint8_t* align_lower_mark_bit (uint8_t* add)
6787 {
6788     return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1));
6789 }
6790
6791 inline
6792 BOOL is_aligned_on_mark_word (uint8_t* add)
6793 {
6794     return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1)));
6795 }
6796
6797 inline
6798 uint8_t* align_on_mark_word (uint8_t* add)
6799 {
6800     return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1));
6801 }
6802
6803 inline
6804 uint8_t* align_lower_mark_word (uint8_t* add)
6805 {
6806     return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1));
6807 }
6808
6809 inline
6810 size_t mark_bit_of (uint8_t* add)
6811 {
6812     return ((size_t)add / mark_bit_pitch);
6813 }
6814
6815 inline
6816 unsigned int mark_bit_bit (size_t mark_bit)
6817 {
6818     return (unsigned int)(mark_bit % mark_word_width);
6819 }
6820
6821 inline
6822 size_t mark_bit_word (size_t mark_bit)
6823 {
6824     return (mark_bit / mark_word_width);
6825 }
6826
6827 inline
6828 size_t mark_word_of (uint8_t* add)
6829 {
6830     return ((size_t)add) / mark_word_size;
6831 }
6832
6833 uint8_t* mark_word_address (size_t wd)
6834 {
6835     return (uint8_t*)(wd*mark_word_size);
6836 }
6837
6838 uint8_t* mark_bit_address (size_t mark_bit)
6839 {
6840     return (uint8_t*)(mark_bit*mark_bit_pitch);
6841 }
6842
6843 inline
6844 size_t mark_bit_bit_of (uint8_t* add)
6845 {
6846     return  (((size_t)add / mark_bit_pitch) % mark_word_width);
6847 }
6848
6849 inline
6850 unsigned int gc_heap::mark_array_marked(uint8_t* add)
6851 {
6852     return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add));
6853 }
6854
6855 inline
6856 BOOL gc_heap::is_mark_bit_set (uint8_t* add)
6857 {
6858     return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)));
6859 }
6860
6861 inline
6862 void gc_heap::mark_array_set_marked (uint8_t* add)
6863 {
6864     size_t index = mark_word_of (add);
6865     uint32_t val = (1 << mark_bit_bit_of (add));
6866 #ifdef MULTIPLE_HEAPS
6867     Interlocked::Or (&(mark_array [index]), val);
6868 #else
6869     mark_array [index] |= val;
6870 #endif 
6871 }
6872
6873 inline
6874 void gc_heap::mark_array_clear_marked (uint8_t* add)
6875 {
6876     mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add));
6877 }
6878
6879 size_t size_mark_array_of (uint8_t* from, uint8_t* end)
6880 {
6881     assert (((size_t)from & ((mark_word_size)-1)) == 0);
6882     assert (((size_t)end  & ((mark_word_size)-1)) == 0);
6883     return sizeof (uint32_t)*(((end - from) / mark_word_size));
6884 }
6885
6886 //In order to eliminate the lowest_address in the mark array
6887 //computations (mark_word_of, etc) mark_array is offset
6888 // according to the lowest_address.
6889 uint32_t* translate_mark_array (uint32_t* ma)
6890 {
6891     return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
6892 }
6893
6894 // from and end must be page aligned addresses. 
6895 void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/
6896 #ifdef FEATURE_BASICFREEZE
6897                                 , BOOL read_only/*=FALSE*/
6898 #endif // FEATURE_BASICFREEZE
6899                                 )
6900 {
6901     if(!gc_can_use_concurrent)
6902         return;
6903
6904 #ifdef FEATURE_BASICFREEZE
6905     if (!read_only)
6906 #endif // FEATURE_BASICFREEZE
6907     {
6908         assert (from == align_on_mark_word (from));
6909     }
6910     assert (end == align_on_mark_word (end));
6911
6912 #ifdef BACKGROUND_GC
6913     uint8_t* current_lowest_address = background_saved_lowest_address;
6914     uint8_t* current_highest_address = background_saved_highest_address;
6915 #else
6916     uint8_t* current_lowest_address = lowest_address;
6917     uint8_t* current_highest_address = highest_address;
6918 #endif //BACKGROUND_GC
6919
6920     //there is a possibility of the addresses to be
6921     //outside of the covered range because of a newly allocated
6922     //large object segment
6923     if ((end <= current_highest_address) && (from >= current_lowest_address))
6924     {
6925         size_t beg_word = mark_word_of (align_on_mark_word (from));
6926         MAYBE_UNUSED_VAR(beg_word);
6927         //align end word to make sure to cover the address
6928         size_t end_word = mark_word_of (align_on_mark_word (end));
6929         MAYBE_UNUSED_VAR(end_word);
6930         dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
6931                      (size_t)mark_word_address (beg_word),
6932                      (size_t)mark_word_address (end_word),
6933                      (size_t)from, (size_t)end,
6934                      (check_only ? "check_only" : "clear")));
6935         if (!check_only)
6936         {
6937             uint8_t* op = from;
6938             while (op < mark_word_address (beg_word))
6939             {
6940                 mark_array_clear_marked (op);
6941                 op += mark_bit_pitch;
6942             }
6943
6944             memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t));
6945         }
6946 #ifdef _DEBUG
6947         else
6948         {
6949             //Beware, it is assumed that the mark array word straddling
6950             //start has been cleared before
6951             //verify that the array is empty.
6952             size_t  markw = mark_word_of (align_on_mark_word (from));
6953             size_t  markw_end = mark_word_of (align_on_mark_word (end));
6954             while (markw < markw_end)
6955             {
6956                 assert (!(mark_array [markw]));
6957                 markw++;
6958             }
6959             uint8_t* p = mark_word_address (markw_end);
6960             while (p < end)
6961             {
6962                 assert (!(mark_array_marked (p)));
6963                 p++;
6964             }
6965         }
6966 #endif //_DEBUG
6967     }
6968 }
6969 #endif //MARK_ARRAY
6970
6971 //These work on untranslated card tables
6972 inline
6973 uint32_t*& card_table_next (uint32_t* c_table)
6974 {
6975     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
6976 }
6977
6978 inline
6979 size_t& card_table_size (uint32_t* c_table)
6980 {
6981     return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
6982 }
6983
6984 void own_card_table (uint32_t* c_table)
6985 {
6986     card_table_refcount (c_table) += 1;
6987 }
6988
6989 void destroy_card_table (uint32_t* c_table);
6990
6991 void delete_next_card_table (uint32_t* c_table)
6992 {
6993     uint32_t* n_table = card_table_next (c_table);
6994     if (n_table)
6995     {
6996         if (card_table_next (n_table))
6997         {
6998             delete_next_card_table (n_table);
6999         }
7000         if (card_table_refcount (n_table) == 0)
7001         {
7002             destroy_card_table (n_table);
7003             card_table_next (c_table) = 0;
7004         }
7005     }
7006 }
7007
7008 void release_card_table (uint32_t* c_table)
7009 {
7010     assert (card_table_refcount (c_table) >0);
7011     card_table_refcount (c_table) -= 1;
7012     if (card_table_refcount (c_table) == 0)
7013     {
7014         delete_next_card_table (c_table);
7015         if (card_table_next (c_table) == 0)
7016         {
7017             destroy_card_table (c_table);
7018             // sever the link from the parent
7019             if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
7020             {
7021                 g_gc_card_table = 0;
7022
7023 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7024                 g_gc_card_bundle_table = 0;
7025 #endif
7026 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7027                 SoftwareWriteWatch::StaticClose();
7028 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7029             }
7030             else
7031             {
7032                 uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
7033                 if (p_table)
7034                 {
7035                     while (p_table && (card_table_next (p_table) != c_table))
7036                         p_table = card_table_next (p_table);
7037                     card_table_next (p_table) = 0;
7038                 }
7039             }
7040         }
7041     }
7042 }
7043
7044 void destroy_card_table (uint32_t* c_table)
7045 {
7046 //  delete (uint32_t*)&card_table_refcount(c_table);
7047
7048     GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
7049     dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
7050 }
7051
7052 uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
7053 {
7054     assert (g_gc_lowest_address == start);
7055     assert (g_gc_highest_address == end);
7056
7057     uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7058
7059     size_t bs = size_brick_of (start, end);
7060     size_t cs = size_card_of (start, end);
7061 #ifdef MARK_ARRAY
7062     size_t ms = (gc_can_use_concurrent ? 
7063                  size_mark_array_of (start, end) :
7064                  0);
7065 #else
7066     size_t ms = 0;
7067 #endif //MARK_ARRAY
7068
7069     size_t cb = 0;
7070
7071 #ifdef CARD_BUNDLE
7072     if (can_use_write_watch_for_card_table())
7073     {
7074         cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
7075 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7076         // If we're not manually managing the card bundles, we will need to use OS write
7077         // watch APIs over this region to track changes.
7078         virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7079 #endif
7080     }
7081 #endif //CARD_BUNDLE
7082
7083     size_t wws = 0;
7084 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7085     size_t sw_ww_table_offset = 0;
7086     if (gc_can_use_concurrent)
7087     {
7088         size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7089         sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7090         wws = sw_ww_table_offset - sw_ww_size_before_table + SoftwareWriteWatch::GetTableByteSize(start, end);
7091     }
7092 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7093
7094 #ifdef GROWABLE_SEG_MAPPING_TABLE
7095     size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7096     size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7097     size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7098
7099     st += (st_table_offset_aligned - st_table_offset);
7100 #else //GROWABLE_SEG_MAPPING_TABLE
7101     size_t st = 0;
7102 #endif //GROWABLE_SEG_MAPPING_TABLE
7103
7104     // it is impossible for alloc_size to overflow due bounds on each of 
7105     // its components.
7106     size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7107     uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7108
7109     if (!mem)
7110         return 0;
7111
7112     dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[",
7113                  alloc_size, (size_t)mem, (size_t)(mem+alloc_size)));
7114
7115     // mark array will be committed separately (per segment).
7116     size_t commit_size = alloc_size - ms;
7117
7118     if (!GCToOSInterface::VirtualCommit (mem, commit_size))
7119     {
7120         dprintf (2, ("Card table commit failed"));
7121         GCToOSInterface::VirtualRelease (mem, alloc_size);
7122         return 0;
7123     }
7124
7125     // initialize the ref count
7126     uint32_t* ct = (uint32_t*)(mem+sizeof (card_table_info));
7127     card_table_refcount (ct) = 0;
7128     card_table_lowest_address (ct) = start;
7129     card_table_highest_address (ct) = end;
7130     card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
7131     card_table_size (ct) = alloc_size;
7132     card_table_next (ct) = 0;
7133
7134 #ifdef CARD_BUNDLE
7135     card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7136
7137 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7138     g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address);
7139 #endif
7140
7141 #endif //CARD_BUNDLE
7142
7143 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7144     if (gc_can_use_concurrent)
7145     {
7146         SoftwareWriteWatch::InitializeUntranslatedTable(mem + sw_ww_table_offset, start);
7147     }
7148 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7149
7150 #ifdef GROWABLE_SEG_MAPPING_TABLE
7151     seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7152     seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table - 
7153                                         size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
7154 #endif //GROWABLE_SEG_MAPPING_TABLE
7155
7156 #ifdef MARK_ARRAY
7157     if (gc_can_use_concurrent)
7158         card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7159     else
7160         card_table_mark_array (ct) = NULL;
7161 #endif //MARK_ARRAY
7162
7163     return translate_card_table(ct);
7164 }
7165
7166 void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p)
7167 {
7168 #ifdef MULTIPLE_HEAPS
7169     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
7170     {
7171         gc_heap* hp = gc_heap::g_heaps [hn];
7172         hp->fgm_result.set_fgm (f, s, loh_p);
7173     }
7174 #else //MULTIPLE_HEAPS
7175     fgm_result.set_fgm (f, s, loh_p);
7176 #endif //MULTIPLE_HEAPS
7177 }
7178
7179 //returns 0 for success, -1 otherwise
7180 // We are doing all the decommitting here because we want to make sure we have
7181 // enough memory to do so - if we do this during copy_brick_card_table and 
7182 // and fail to decommit it would make the failure case very complicated to 
7183 // handle. This way we can waste some decommit if we call this multiple 
7184 // times before the next FGC but it's easier to handle the failure case.
7185 int gc_heap::grow_brick_card_tables (uint8_t* start,
7186                                      uint8_t* end,
7187                                      size_t size,
7188                                      heap_segment* new_seg, 
7189                                      gc_heap* hp, 
7190                                      BOOL loh_p)
7191 {
7192     uint8_t* la = g_gc_lowest_address;
7193     uint8_t* ha = g_gc_highest_address;
7194     uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
7195     uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
7196     seg_mapping* new_seg_mapping_table = nullptr;
7197 #ifdef BACKGROUND_GC
7198     // This value is only for logging purpose - it's not necessarily exactly what we 
7199     // would commit for mark array but close enough for diagnostics purpose.
7200     size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size);
7201 #endif //BACKGROUND_GC
7202
7203     // See if the address is already covered
7204     if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address))
7205     {
7206         {
7207             //modify the higest address so the span covered
7208             //is twice the previous one.
7209             uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit());
7210             // On non-Windows systems, we get only an approximate value that can possibly be
7211             // slightly lower than the saved_g_highest_address.
7212             // In such case, we set the top to the saved_g_highest_address so that the
7213             // card and brick tables always cover the whole new range.
7214             if (top < saved_g_highest_address)
7215             {
7216                 top = saved_g_highest_address;
7217             }
7218             size_t ps = ha-la;
7219 #ifdef BIT64
7220             if (ps > (uint64_t)200*1024*1024*1024)
7221                 ps += (uint64_t)100*1024*1024*1024;
7222             else
7223 #endif // BIT64
7224                 ps *= 2;
7225
7226             if (saved_g_lowest_address < g_gc_lowest_address)
7227             {
7228                 if (ps > (size_t)g_gc_lowest_address)
7229                     saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE;
7230                 else
7231                 {
7232                     assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
7233                     saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
7234                 }
7235             }
7236
7237             if (saved_g_highest_address > g_gc_highest_address)
7238             {
7239                 saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
7240                 if (saved_g_highest_address > top)
7241                     saved_g_highest_address = top;
7242             }
7243         }
7244         dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[",
7245                                 (size_t)saved_g_lowest_address,
7246                                 (size_t)saved_g_highest_address));
7247
7248         bool write_barrier_updated = false;
7249         uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7250         uint32_t* saved_g_card_table = g_gc_card_table;
7251
7252 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7253         uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table;
7254 #endif
7255
7256         uint32_t* ct = 0;
7257         uint32_t* translated_ct = 0;
7258         short* bt = 0;
7259
7260         size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
7261         size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);
7262
7263 #ifdef MARK_ARRAY
7264         size_t ms = (gc_heap::gc_can_use_concurrent ? 
7265                     size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
7266                     0);
7267 #else
7268         size_t ms = 0;
7269 #endif //MARK_ARRAY
7270
7271         size_t cb = 0;
7272
7273 #ifdef CARD_BUNDLE
7274         if (can_use_write_watch_for_card_table())
7275         {
7276             cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
7277
7278 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7279             // If we're not manually managing the card bundles, we will need to use OS write
7280             // watch APIs over this region to track changes.
7281             virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7282 #endif
7283         }
7284 #endif //CARD_BUNDLE
7285
7286         size_t wws = 0;
7287 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7288         size_t sw_ww_table_offset = 0;
7289         if (gc_can_use_concurrent)
7290         {
7291             size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7292             sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7293             wws =
7294                 sw_ww_table_offset -
7295                 sw_ww_size_before_table +
7296                 SoftwareWriteWatch::GetTableByteSize(saved_g_lowest_address, saved_g_highest_address);
7297         }
7298 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7299
7300 #ifdef GROWABLE_SEG_MAPPING_TABLE
7301         size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
7302         size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7303         size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7304         st += (st_table_offset_aligned - st_table_offset);
7305 #else //GROWABLE_SEG_MAPPING_TABLE
7306         size_t st = 0;
7307 #endif //GROWABLE_SEG_MAPPING_TABLE
7308
7309         // it is impossible for alloc_size to overflow due bounds on each of 
7310         // its components.
7311         size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7312         dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
7313                                   cs, bs, cb, wws, st, ms));
7314
7315         uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7316
7317         if (!mem)
7318         {
7319             set_fgm_result (fgm_grow_table, alloc_size, loh_p);
7320             goto fail;
7321         }
7322
7323         dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[",
7324                                  alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size)));
7325
7326         {   
7327             // mark array will be committed separately (per segment).
7328             size_t commit_size = alloc_size - ms;
7329
7330             if (!GCToOSInterface::VirtualCommit (mem, commit_size))
7331             {
7332                 dprintf (GC_TABLE_LOG, ("Table commit failed"));
7333                 set_fgm_result (fgm_commit_table, commit_size, loh_p);
7334                 goto fail;
7335             }
7336         }
7337
7338         ct = (uint32_t*)(mem + sizeof (card_table_info));
7339         card_table_refcount (ct) = 0;
7340         card_table_lowest_address (ct) = saved_g_lowest_address;
7341         card_table_highest_address (ct) = saved_g_highest_address;
7342         card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];
7343
7344         //clear the card table
7345 /*
7346         memclr ((uint8_t*)ct,
7347                 (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) /
7348                   (card_size * card_word_width))
7349                  + sizeof (uint32_t)));
7350 */
7351
7352         bt = (short*)((uint8_t*)ct + cs);
7353
7354         // No initialization needed, will be done in copy_brick_card
7355
7356         card_table_brick_table (ct) = bt;
7357
7358 #ifdef CARD_BUNDLE
7359         card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7360         //set all bundle to look at all of the cards
7361         memset(card_table_card_bundle_table (ct), 0xFF, cb);
7362 #endif //CARD_BUNDLE
7363
7364 #ifdef GROWABLE_SEG_MAPPING_TABLE
7365         {
7366             new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7367             new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
7368                                               size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
7369             memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7370                 &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7371                 size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
7372
7373             // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
7374             // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
7375             // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
7376             // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
7377             // if an OOM occurs.
7378         }
7379 #endif //GROWABLE_SEG_MAPPING_TABLE
7380
7381 #ifdef MARK_ARRAY
7382         if(gc_can_use_concurrent)
7383             card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7384         else
7385             card_table_mark_array (ct) = NULL;
7386 #endif //MARK_ARRAY
7387
7388         translated_ct = translate_card_table (ct);
7389
7390         dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix", 
7391             (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct)));
7392
7393 #ifdef BACKGROUND_GC
7394         if (hp->should_commit_mark_array())
7395         {
7396             dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)", 
7397                                     saved_g_lowest_address, saved_g_highest_address,
7398                                     card_table_mark_array (ct),
7399                                     translate_mark_array (card_table_mark_array (ct))));
7400             uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address));
7401             if (!commit_new_mark_array_global (new_mark_array))
7402             {
7403                 dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments"));
7404                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7405                 goto fail;
7406             }
7407
7408             if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address))
7409             {
7410                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg"));
7411                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7412                 goto fail;
7413             }
7414         }
7415         else
7416         {
7417             clear_commit_flag_global();
7418         }
7419 #endif //BACKGROUND_GC
7420
7421 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7422         if (gc_can_use_concurrent)
7423         {
7424             // The current design of software write watch requires that the runtime is suspended during resize. Suspending
7425             // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch().
7426             // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old
7427             // table info lazily as done for card tables.
7428
7429             // Either this thread was the thread that did the suspension which means we are suspended; or this is called
7430             // from a GC thread which means we are in a blocking GC and also suspended.
7431             bool is_runtime_suspended = GCToEEInterface::IsGCThread();
7432             if (!is_runtime_suspended)
7433             {
7434                 // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
7435                 // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
7436                 // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
7437                 // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
7438                 // g_gc_highest_address.
7439                 suspend_EE();
7440             }
7441
7442             g_gc_card_table = translated_ct;
7443
7444 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7445             g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7446 #endif
7447
7448             SoftwareWriteWatch::SetResizedUntranslatedTable(
7449                 mem + sw_ww_table_offset,
7450                 saved_g_lowest_address,
7451                 saved_g_highest_address);
7452
7453             // Since the runtime is already suspended, update the write barrier here as well.
7454             // This passes a bool telling whether we need to switch to the post
7455             // grow version of the write barrier.  This test tells us if the new
7456             // segment was allocated at a lower address than the old, requiring
7457             // that we start doing an upper bounds check in the write barrier.
7458             g_gc_lowest_address = saved_g_lowest_address;
7459             g_gc_highest_address = saved_g_highest_address;
7460             stomp_write_barrier_resize(true, la != saved_g_lowest_address);
7461             write_barrier_updated = true;
7462
7463             if (!is_runtime_suspended)
7464             {
7465                 restart_EE();
7466             }
7467         }
7468         else
7469 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7470         {
7471             g_gc_card_table = translated_ct;
7472
7473 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7474             g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7475 #endif
7476         }
7477
7478         seg_mapping_table = new_seg_mapping_table;
7479
7480         GCToOSInterface::FlushProcessWriteBuffers();
7481         g_gc_lowest_address = saved_g_lowest_address;
7482         g_gc_highest_address = saved_g_highest_address;
7483
7484         if (!write_barrier_updated)
7485         {
7486             // This passes a bool telling whether we need to switch to the post
7487             // grow version of the write barrier.  This test tells us if the new
7488             // segment was allocated at a lower address than the old, requiring
7489             // that we start doing an upper bounds check in the write barrier.
7490             // This will also suspend the runtime if the write barrier type needs
7491             // to be changed, so we are doing this after all global state has
7492             // been updated. See the comment above suspend_EE() above for more
7493             // info.
7494             stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address);
7495         }
7496
7497
7498         return 0;
7499         
7500 fail:
7501         //cleanup mess and return -1;
7502
7503         if (mem)
7504         {
7505             assert(g_gc_card_table == saved_g_card_table);
7506
7507 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7508             assert(g_gc_card_bundle_table  == saved_g_card_bundle_table);
7509 #endif
7510
7511             //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
7512             if (!GCToOSInterface::VirtualRelease (mem, alloc_size))
7513             {
7514                 dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
7515                 assert (!"release failed");
7516             }
7517         }
7518
7519         return -1;
7520     }
7521     else
7522     {
7523 #ifdef BACKGROUND_GC
7524         if (hp->should_commit_mark_array())
7525         {
7526             dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array));
7527             if (!commit_mark_array_new_seg (hp, new_seg))
7528             {
7529                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range"));
7530                 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7531                 return -1;
7532             }
7533         }
7534 #endif //BACKGROUND_GC
7535     }
7536
7537     return 0;
7538 }
7539
7540 //copy all of the arrays managed by the card table for a page aligned range
7541 void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
7542                                      short* old_brick_table,
7543                                      heap_segment* seg,
7544                                      uint8_t* start, uint8_t* end)
7545 {
7546     ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
7547
7548
7549     dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end));
7550
7551     // copy brick table
7552     short* brick_start = &brick_table [brick_of (start)];
7553     if (old_brick_table)
7554     {
7555         // segments are always on page boundaries
7556         memcpy (brick_start, &old_brick_table[brick_offset],
7557                 size_brick_of (start, end));
7558
7559     }
7560     else
7561     {
7562         // This is a large heap, just clear the brick table
7563     }
7564
7565     uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
7566 #ifdef MARK_ARRAY
7567 #ifdef BACKGROUND_GC
7568     UNREFERENCED_PARAMETER(seg);
7569     if (recursive_gc_sync::background_running_p())
7570     {
7571         uint32_t* old_mark_array = card_table_mark_array (old_ct);
7572
7573         // We don't need to go through all the card tables here because 
7574         // we only need to copy from the GC version of the mark array - when we
7575         // mark (even in allocate_large_object) we always use that mark array.
7576         if ((card_table_highest_address (old_ct) >= start) &&
7577             (card_table_lowest_address (old_ct) <= end))
7578         {
7579             if ((background_saved_highest_address >= start) &&
7580                 (background_saved_lowest_address <= end))
7581             {
7582                 //copy the mark bits
7583                 // segments are always on page boundaries
7584                 uint8_t* m_start = max (background_saved_lowest_address, start);
7585                 uint8_t* m_end = min (background_saved_highest_address, end);
7586                 memcpy (&mark_array[mark_word_of (m_start)],
7587                         &old_mark_array[mark_word_of (m_start) - mark_word_of (la)],
7588                         size_mark_array_of (m_start, m_end));
7589             }
7590         }
7591         else
7592         {
7593             //only large segments can be out of range
7594             assert (old_brick_table == 0);
7595         }
7596     }
7597 #else //BACKGROUND_GC
7598     assert (seg != 0);
7599     clear_mark_array (start, heap_segment_committed(seg));
7600 #endif //BACKGROUND_GC
7601 #endif //MARK_ARRAY
7602
7603     // n way merge with all of the card table ever used in between
7604     uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);
7605
7606     assert (ct);
7607     while (card_table_next (old_ct) != ct)
7608     {
7609         //copy if old card table contained [start, end[
7610         if ((card_table_highest_address (ct) >= end) &&
7611             (card_table_lowest_address (ct) <= start))
7612         {
7613             // or the card_tables
7614
7615             size_t start_word = card_word (card_of (start));
7616
7617             uint32_t* dest = &card_table[start_word];
7618             uint32_t* src = &((translate_card_table (ct))[start_word]);
7619             ptrdiff_t count = count_card_of (start, end);
7620             for (int x = 0; x < count; x++)
7621             {
7622                 *dest |= *src;
7623
7624 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7625                 if (*src != 0)
7626                 {
7627                     card_bundle_set(cardw_card_bundle(start_word+x));
7628                 }
7629 #endif
7630
7631                 dest++;
7632                 src++;
7633             }
7634         }
7635         ct = card_table_next (ct);
7636     }
7637 }
7638
7639 //initialize all of the arrays managed by the card table for a page aligned range when an existing ro segment becomes in range
7640 void gc_heap::init_brick_card_range (heap_segment* seg)
7641 {
7642     dprintf (2, ("initialising tables for range [%Ix %Ix[",
7643                  (size_t)heap_segment_mem (seg),
7644                  (size_t)heap_segment_allocated (seg)));
7645
7646     // initialize the brick table
7647     for (size_t b = brick_of (heap_segment_mem (seg));
7648          b < brick_of (align_on_brick (heap_segment_allocated (seg)));
7649          b++)
7650     {
7651         set_brick (b, -1);
7652     }
7653
7654 #ifdef MARK_ARRAY
7655     if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
7656     {
7657         assert (seg != 0);
7658         clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
7659     }
7660 #endif //MARK_ARRAY
7661
7662     clear_card_for_addresses (heap_segment_mem (seg),
7663                               heap_segment_allocated (seg));
7664 }
7665
7666 void gc_heap::copy_brick_card_table()
7667 {
7668     uint8_t* la = lowest_address;
7669     uint8_t* ha = highest_address;
7670     MAYBE_UNUSED_VAR(ha);
7671     uint32_t* old_card_table = card_table;
7672     short* old_brick_table = brick_table;
7673
7674     assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
7675     assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
7676
7677     /* todo: Need a global lock for this */
7678     uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
7679     own_card_table (ct);
7680     card_table = translate_card_table (ct);
7681     /* End of global lock */
7682     highest_address = card_table_highest_address (ct);
7683     lowest_address = card_table_lowest_address (ct);
7684
7685     brick_table = card_table_brick_table (ct);
7686
7687 #ifdef MARK_ARRAY
7688     if (gc_can_use_concurrent)
7689     {
7690         mark_array = translate_mark_array (card_table_mark_array (ct));
7691         assert (mark_word_of (g_gc_highest_address) ==
7692             mark_word_of (align_on_mark_word (g_gc_highest_address)));
7693     }
7694     else
7695         mark_array = NULL;
7696 #endif //MARK_ARRAY
7697
7698 #ifdef CARD_BUNDLE
7699 #if defined(MARK_ARRAY) && defined(_DEBUG)
7700 #ifdef GROWABLE_SEG_MAPPING_TABLE
7701     size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7702 #else  //GROWABLE_SEG_MAPPING_TABLE
7703     size_t st = 0;
7704 #endif //GROWABLE_SEG_MAPPING_TABLE
7705 #endif //MARK_ARRAY && _DEBUG
7706     card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
7707
7708     // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
7709     // start of the untranslated table.
7710     assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
7711             card_table_card_bundle_table (ct));
7712
7713     //set the card table if we are in a heap growth scenario
7714     if (card_bundles_enabled())
7715     {
7716         card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
7717                           cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
7718     }
7719     //check if we need to turn on card_bundles.
7720 #ifdef MULTIPLE_HEAPS
7721     // use INT64 arithmetic here because of possible overflow on 32p
7722     uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps;
7723 #else
7724     // use INT64 arithmetic here because of possible overflow on 32p
7725     uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
7726 #endif //MULTIPLE_HEAPS
7727     if (reserved_memory >= th)
7728     {
7729         enable_card_bundles();
7730     }
7731
7732 #endif //CARD_BUNDLE
7733
7734     // for each of the segments and heaps, copy the brick table and
7735     // or the card table
7736     heap_segment* seg = generation_start_segment (generation_of (max_generation));
7737     while (seg)
7738     {
7739         if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7740         {
7741             //check if it became in range
7742             if ((heap_segment_reserved (seg) > lowest_address) &&
7743                 (heap_segment_mem (seg) < highest_address))
7744             {
7745                 set_ro_segment_in_range (seg);
7746             }
7747         }
7748         else
7749         {
7750
7751             uint8_t* end = align_on_page (heap_segment_allocated (seg));
7752             copy_brick_card_range (la, old_card_table,
7753                                    old_brick_table,
7754                                    seg,
7755                                    align_lower_page (heap_segment_mem (seg)),
7756                                    end);
7757         }
7758         seg = heap_segment_next (seg);
7759     }
7760
7761     seg = generation_start_segment (large_object_generation);
7762     while (seg)
7763     {
7764         if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7765         {
7766             //check if it became in range
7767             if ((heap_segment_reserved (seg) > lowest_address) &&
7768                 (heap_segment_mem (seg) < highest_address))
7769             {
7770                 set_ro_segment_in_range (seg);
7771             }
7772         }
7773         else
7774         {
7775             uint8_t* end = align_on_page (heap_segment_allocated (seg));
7776             copy_brick_card_range (la, old_card_table,
7777                                    0,
7778                                    seg,
7779                                    align_lower_page (heap_segment_mem (seg)),
7780                                    end);
7781         }
7782         seg = heap_segment_next (seg);
7783     }
7784
7785     release_card_table (&old_card_table[card_word (card_of(la))]);
7786 }
7787
7788 #ifdef FEATURE_BASICFREEZE
7789 BOOL gc_heap::insert_ro_segment (heap_segment* seg)
7790 {
7791     enter_spin_lock (&gc_heap::gc_lock);
7792
7793     if (!gc_heap::seg_table->ensure_space_for_insert ()
7794         || (should_commit_mark_array() && !commit_mark_array_new_seg(__this, seg)))
7795     {
7796         leave_spin_lock(&gc_heap::gc_lock);
7797         return FALSE;
7798     }
7799
7800     //insert at the head of the segment list
7801     generation* gen2 = generation_of (max_generation);
7802     heap_segment* oldhead = generation_start_segment (gen2);
7803     heap_segment_next (seg) = oldhead;
7804     generation_start_segment (gen2) = seg;
7805
7806     seg_table->insert (heap_segment_mem(seg), (size_t)seg);
7807
7808 #ifdef SEG_MAPPING_TABLE
7809     seg_mapping_table_add_ro_segment (seg);
7810 #endif //SEG_MAPPING_TABLE
7811
7812     //test if in range
7813     if ((heap_segment_reserved (seg) > lowest_address) &&
7814         (heap_segment_mem (seg) < highest_address))
7815     {
7816         set_ro_segment_in_range (seg);
7817     }
7818
7819     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap);
7820
7821     leave_spin_lock (&gc_heap::gc_lock);
7822     return TRUE;
7823 }
7824
7825 // No one is calling this function right now. If this is getting called we need
7826 // to take care of decommitting the mark array for it - we will need to remember
7827 // which portion of the mark array was committed and only decommit that.
7828 void gc_heap::remove_ro_segment (heap_segment* seg)
7829 {
7830 //clear the mark bits so a new segment allocated in its place will have a clear mark bits
7831 #ifdef MARK_ARRAY
7832     if (gc_can_use_concurrent)
7833     {
7834         clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
7835                       align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
7836                       false); // read_only segments need the mark clear
7837     }
7838 #endif //MARK_ARRAY
7839
7840     enter_spin_lock (&gc_heap::gc_lock);
7841
7842     seg_table->remove ((uint8_t*)seg);
7843
7844 #ifdef SEG_MAPPING_TABLE
7845     seg_mapping_table_remove_ro_segment (seg);
7846 #endif //SEG_MAPPING_TABLE
7847
7848     // Locate segment (and previous segment) in the list.
7849     generation* gen2 = generation_of (max_generation);
7850     heap_segment* curr_seg = generation_start_segment (gen2);
7851     heap_segment* prev_seg = NULL;
7852
7853     while (curr_seg && curr_seg != seg)
7854     {
7855         prev_seg = curr_seg;
7856         curr_seg = heap_segment_next (curr_seg);
7857     }
7858     assert (curr_seg == seg);
7859
7860     // Patch previous segment (or list head if there is none) to skip the removed segment.
7861     if (prev_seg)
7862         heap_segment_next (prev_seg) = heap_segment_next (curr_seg);
7863     else
7864         generation_start_segment (gen2) = heap_segment_next (curr_seg);
7865
7866     leave_spin_lock (&gc_heap::gc_lock);
7867 }
7868 #endif //FEATURE_BASICFREEZE
7869
7870 BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
7871 {
7872     //set it in range
7873     seg->flags |= heap_segment_flags_inrange;
7874 //    init_brick_card_range (seg);
7875     ro_segments_in_range = TRUE;
7876     //right now, segments aren't protected
7877     //unprotect_segment (seg);
7878     return TRUE;
7879 }
7880
7881 #ifdef MARK_LIST
7882
7883 uint8_t** make_mark_list (size_t size)
7884 {
7885     uint8_t** mark_list = new (nothrow) uint8_t* [size];
7886     return mark_list;
7887 }
7888
7889 #define swap(a,b){uint8_t* t; t = a; a = b; b = t;}
7890
7891 void verify_qsort_array (uint8_t* *low, uint8_t* *high)
7892 {
7893     uint8_t **i = 0;
7894
7895     for (i = low+1; i <= high; i++)
7896     {
7897         if (*i < *(i-1))
7898         {
7899             FATAL_GC_ERROR();
7900         }
7901     }
7902 }
7903
7904 #ifndef USE_INTROSORT
7905 void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth)
7906 {
7907     if (((low + 16) >= high) || (depth > 100))
7908     {
7909         //insertion sort
7910         uint8_t **i, **j;
7911         for (i = low+1; i <= high; i++)
7912         {
7913             uint8_t* val = *i;
7914             for (j=i;j >low && val<*(j-1);j--)
7915             {
7916                 *j=*(j-1);
7917             }
7918             *j=val;
7919         }
7920     }
7921     else
7922     {
7923         uint8_t *pivot, **left, **right;
7924
7925         //sort low middle and high
7926         if (*(low+((high-low)/2)) < *low)
7927             swap (*(low+((high-low)/2)), *low);
7928         if (*high < *low)
7929             swap (*low, *high);
7930         if (*high < *(low+((high-low)/2)))
7931             swap (*(low+((high-low)/2)), *high);
7932
7933         swap (*(low+((high-low)/2)), *(high-1));
7934         pivot =  *(high-1);
7935         left = low; right = high-1;
7936         while (1) {
7937             while (*(--right) > pivot);
7938             while (*(++left)  < pivot);
7939             if (left < right)
7940             {
7941                 swap(*left, *right);
7942             }
7943             else
7944                 break;
7945         }
7946         swap (*left, *(high-1));
7947         qsort1(low, left-1, depth+1);
7948         qsort1(left+1, high, depth+1);
7949     }
7950 }
7951 #endif //USE_INTROSORT
7952 void rqsort1( uint8_t* *low, uint8_t* *high)
7953 {
7954     if ((low + 16) >= high)
7955     {
7956         //insertion sort
7957         uint8_t **i, **j;
7958         for (i = low+1; i <= high; i++)
7959         {
7960             uint8_t* val = *i;
7961             for (j=i;j >low && val>*(j-1);j--)
7962             {
7963                 *j=*(j-1);
7964             }
7965             *j=val;
7966         }
7967     }
7968     else
7969     {
7970         uint8_t *pivot, **left, **right;
7971
7972         //sort low middle and high
7973         if (*(low+((high-low)/2)) > *low)
7974             swap (*(low+((high-low)/2)), *low);
7975         if (*high > *low)
7976             swap (*low, *high);
7977         if (*high > *(low+((high-low)/2)))
7978             swap (*(low+((high-low)/2)), *high);
7979
7980         swap (*(low+((high-low)/2)), *(high-1));
7981         pivot =  *(high-1);
7982         left = low; right = high-1;
7983         while (1) {
7984             while (*(--right) < pivot);
7985             while (*(++left)  > pivot);
7986             if (left < right)
7987             {
7988                 swap(*left, *right);
7989             }
7990             else
7991                 break;
7992         }
7993         swap (*left, *(high-1));
7994         rqsort1(low, left-1);
7995         rqsort1(left+1, high);
7996     }
7997 }
7998
7999 #ifdef USE_INTROSORT
8000 class introsort 
8001 {
8002
8003 private: 
8004     static const int size_threshold = 64;
8005     static const int max_depth = 100;
8006
8007
8008 inline static void swap_elements(uint8_t** i,uint8_t** j)
8009     {
8010         uint8_t* t=*i;
8011         *i=*j; 
8012         *j=t;
8013     }
8014
8015 public:
8016     static void sort (uint8_t** begin, uint8_t** end, int ignored)
8017     {
8018         ignored = 0;
8019         introsort_loop (begin, end, max_depth);
8020         insertionsort (begin, end);
8021     }
8022
8023 private: 
8024
8025     static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit)
8026     {
8027         while (hi-lo >= size_threshold)
8028         {
8029             if (depth_limit == 0)
8030             {
8031                 heapsort (lo, hi);
8032                 return;
8033             }
8034             uint8_t** p=median_partition (lo, hi);
8035             depth_limit=depth_limit-1;
8036             introsort_loop (p, hi, depth_limit);
8037             hi=p-1;
8038         }        
8039     }
8040
8041     static uint8_t** median_partition (uint8_t** low, uint8_t** high)
8042     {
8043         uint8_t *pivot, **left, **right;
8044
8045         //sort low middle and high
8046         if (*(low+((high-low)/2)) < *low)
8047             swap_elements ((low+((high-low)/2)), low);
8048         if (*high < *low)
8049             swap_elements (low, high);
8050         if (*high < *(low+((high-low)/2)))
8051             swap_elements ((low+((high-low)/2)), high);
8052
8053         swap_elements ((low+((high-low)/2)), (high-1));
8054         pivot =  *(high-1);
8055         left = low; right = high-1;
8056         while (1) {
8057             while (*(--right) > pivot);
8058             while (*(++left)  < pivot);
8059             if (left < right)
8060             {
8061                 swap_elements(left, right);
8062             }
8063             else
8064                 break;
8065         }
8066         swap_elements (left, (high-1));
8067         return left;
8068     }
8069
8070
8071     static void insertionsort (uint8_t** lo, uint8_t** hi)
8072     {
8073         for (uint8_t** i=lo+1; i <= hi; i++)
8074         {
8075             uint8_t** j = i;
8076             uint8_t* t = *i;
8077             while((j > lo) && (t <*(j-1)))
8078             {
8079                 *j = *(j-1);
8080                 j--;
8081             }
8082             *j = t;
8083         }
8084     }
8085
8086     static void heapsort (uint8_t** lo, uint8_t** hi)
8087     { 
8088         size_t n = hi - lo + 1;
8089         for (size_t i=n / 2; i >= 1; i--)
8090         {
8091             downheap (i,n,lo);
8092         }
8093         for (size_t i = n; i > 1; i--)
8094         {
8095             swap_elements (lo, lo + i - 1);
8096             downheap(1, i - 1,  lo);
8097         }
8098     }
8099
8100     static void downheap (size_t i, size_t n, uint8_t** lo)
8101     {
8102         uint8_t* d = *(lo + i - 1);
8103         size_t child;
8104         while (i <= n / 2)
8105         {
8106             child = 2*i;
8107             if (child < n && *(lo + child - 1)<(*(lo + child)))
8108             {
8109                 child++;
8110             }
8111             if (!(d<*(lo + child - 1))) 
8112             {
8113                 break;
8114             }
8115             *(lo + i - 1) = *(lo + child - 1);
8116             i = child;
8117         }
8118         *(lo + i - 1) = d;
8119     }
8120
8121 };
8122
8123 #endif //USE_INTROSORT    
8124
8125 #ifdef MULTIPLE_HEAPS
8126 #ifdef PARALLEL_MARK_LIST_SORT
8127 void gc_heap::sort_mark_list()
8128 {
8129     // if this heap had a mark list overflow, we don't do anything
8130     if (mark_list_index > mark_list_end)
8131     {
8132 //        printf("sort_mark_list: overflow on heap %d\n", heap_number);
8133         return;
8134     }
8135
8136     // if any other heap had a mark list overflow, we fake one too,
8137     // so we don't use an incomplete mark list by mistake
8138     for (int i = 0; i < n_heaps; i++)
8139     {
8140         if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end)
8141         {
8142             mark_list_index = mark_list_end + 1;
8143 //            printf("sort_mark_list: overflow on heap %d\n", i);
8144             return;
8145         }
8146     }
8147
8148 //    unsigned long start = GetCycleCount32();
8149
8150     dprintf (3, ("Sorting mark lists"));
8151     if (mark_list_index > mark_list)
8152         _sort (mark_list, mark_list_index - 1, 0);
8153
8154 //    printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list);
8155 //    start = GetCycleCount32();
8156
8157     // first set the pieces for all heaps to empty
8158     int heap_num;
8159     for (heap_num = 0; heap_num < n_heaps; heap_num++)
8160     {
8161         mark_list_piece_start[heap_num] = NULL;
8162         mark_list_piece_end[heap_num] = NULL;
8163     }
8164
8165     uint8_t** x = mark_list;
8166
8167 // predicate means: x is still within the mark list, and within the bounds of this heap
8168 #define predicate(x) (((x) < mark_list_index) && (*(x) < heap->ephemeral_high))
8169
8170     heap_num = -1;
8171     while (x < mark_list_index)
8172     {
8173         gc_heap* heap;
8174         // find the heap x points into - searching cyclically from the last heap,
8175         // because in many cases the right heap is the next one or comes soon after
8176         int last_heap_num = heap_num;
8177         MAYBE_UNUSED_VAR(last_heap_num);
8178         do
8179         {
8180             heap_num++;
8181             if (heap_num >= n_heaps)
8182                 heap_num = 0;
8183             assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not!
8184             heap = g_heaps[heap_num];
8185         }
8186         while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high));
8187
8188         // x is the start of the mark list piece for this heap
8189         mark_list_piece_start[heap_num] = x;
8190
8191         // to find the end of the mark list piece for this heap, find the first x
8192         // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list
8193         if (predicate(x))
8194         {
8195             // let's see if we get lucky and the whole rest belongs to this piece
8196             if (predicate(mark_list_index-1))
8197             {
8198                 x = mark_list_index;
8199                 mark_list_piece_end[heap_num] = x;
8200                 break;
8201             }
8202
8203             // we play a variant of binary search to find the point sooner.
8204             // the first loop advances by increasing steps until the predicate turns false.
8205             // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true.
8206             unsigned inc = 1;
8207             do
8208             {
8209                 inc *= 2;
8210                 uint8_t** temp_x = x;
8211                 x += inc;
8212                 if (temp_x > x)
8213                 {
8214                     break;
8215                 }
8216             }
8217             while (predicate(x));
8218             // we know that only the last step was wrong, so we undo it
8219             x -= inc;
8220             do
8221             {
8222                 // loop invariant - predicate holds at x, but not x + inc
8223                 assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc)));
8224                 inc /= 2;
8225                 if (((x + inc) > x) && predicate(x + inc))
8226                 {
8227                     x += inc;
8228                 }
8229             }
8230             while (inc > 1);
8231             // the termination condition and the loop invariant together imply this:
8232             assert(predicate(x) && !predicate(x + inc) && (inc == 1));
8233             // so the spot we're looking for is one further
8234             x += 1;
8235         }
8236         mark_list_piece_end[heap_num] = x;
8237     }
8238
8239 #undef predicate
8240
8241 //    printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start);
8242 }
8243
8244 void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end)
8245 {
8246     size_t slots_needed = end - start;
8247     size_t slots_available = mark_list_end + 1 - mark_list_index;
8248     size_t slots_to_copy = min(slots_needed, slots_available);
8249     memcpy(mark_list_index, start, slots_to_copy*sizeof(*start));
8250     mark_list_index += slots_to_copy;
8251 //    printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy);
8252 }
8253
8254 void gc_heap::merge_mark_lists()
8255 {
8256     uint8_t** source[MAX_SUPPORTED_CPUS];
8257     uint8_t** source_end[MAX_SUPPORTED_CPUS];
8258     int source_heap[MAX_SUPPORTED_CPUS];
8259     int source_count = 0;
8260
8261     // in case of mark list overflow, don't bother
8262     if (mark_list_index >  mark_list_end)
8263     {
8264 //        printf("merge_mark_lists: overflow\n");
8265         return;
8266     }
8267
8268     dprintf(3, ("merge_mark_lists: heap_number = %d  starts out with %Id entries", heap_number, mark_list_index - mark_list));
8269 //    unsigned long start = GetCycleCount32();
8270     for (int i = 0; i < n_heaps; i++)
8271     {
8272         gc_heap* heap = g_heaps[i];
8273         if (heap->mark_list_piece_start[heap_number] < heap->mark_list_piece_end[heap_number])
8274         {
8275             source[source_count] = heap->mark_list_piece_start[heap_number];
8276             source_end[source_count] = heap->mark_list_piece_end[heap_number];
8277             source_heap[source_count] = i;
8278             if (source_count < MAX_SUPPORTED_CPUS)
8279                 source_count++;
8280         }
8281     }
8282 //    printf("first phase of merge_mark_lists for heap %d took %u cycles\n", heap_number, GetCycleCount32() - start);
8283
8284     dprintf(3, ("heap_number = %d  has %d sources\n", heap_number, source_count));
8285 #if defined(_DEBUG) || defined(TRACE_GC)
8286     for (int j = 0; j < source_count; j++)
8287     {
8288         dprintf(3, ("heap_number = %d  ", heap_number));
8289         dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)",
8290             (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j])));
8291        // the sources should all be sorted
8292         for (uint8_t **x = source[j]; x < source_end[j] - 1; x++)
8293         {
8294             if (x[0] > x[1])
8295             {
8296                 dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, heap_number));
8297                 assert (0);
8298             }
8299         }
8300     }
8301 #endif //_DEBUG || TRACE_GC
8302
8303 //    start = GetCycleCount32();
8304
8305     mark_list = &g_mark_list_copy [heap_number*mark_list_size];
8306     mark_list_index = mark_list;
8307     mark_list_end = &mark_list [mark_list_size-1];
8308     int piece_count = 0;
8309     if (source_count == 0)
8310     {
8311         ; // nothing to do
8312     }
8313     else if (source_count == 1)
8314     {
8315         mark_list = source[0];
8316         mark_list_index = source_end[0];
8317         mark_list_end = mark_list_index;
8318         piece_count++;
8319     }
8320     else
8321     {
8322         while (source_count > 1)
8323         {
8324             // find the lowest and second lowest value in the sources we're merging from
8325             int lowest_source = 0;
8326             uint8_t *lowest = *source[0];
8327             uint8_t *second_lowest = *source[1];
8328             for (int i = 1; i < source_count; i++)
8329             {
8330                 if (lowest > *source[i])
8331                 {
8332                     second_lowest = lowest;
8333                     lowest = *source[i];
8334                     lowest_source = i;
8335                 }
8336                 else if (second_lowest > *source[i])
8337                 {
8338                     second_lowest = *source[i];
8339                 }
8340             }
8341
8342             // find the point in the lowest source where it either runs out or is not <= second_lowest anymore
8343
8344             // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common
8345             uint8_t **x;
8346             if (source_end[lowest_source][-1] <= second_lowest)
8347                 x = source_end[lowest_source];
8348             else
8349             {
8350                 // use linear search to find the end -- could also use binary search as in sort_mark_list,
8351                 // but saw no improvement doing that
8352                 for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++)
8353                     ;
8354             }
8355
8356             // blast this piece to the mark list
8357             append_to_mark_list(source[lowest_source], x);
8358             piece_count++;
8359
8360             source[lowest_source] = x;
8361
8362             // check whether this source is now exhausted
8363             if (x >= source_end[lowest_source])
8364             {
8365                 // if it's not the source with the highest index, copy the source with the highest index
8366                 // over it so the non-empty sources are always at the beginning
8367                 if (lowest_source < source_count-1)
8368                 {
8369                     source[lowest_source] = source[source_count-1];
8370                     source_end[lowest_source] = source_end[source_count-1];
8371                 }
8372                 source_count--;
8373             }
8374         }
8375         // we're left with just one source that we copy
8376         append_to_mark_list(source[0], source_end[0]);
8377         piece_count++;
8378     }
8379
8380 //    printf("second phase of merge_mark_lists for heap %d took %u cycles to merge %d pieces\n", heap_number, GetCycleCount32() - start, piece_count);
8381
8382 #if defined(_DEBUG) || defined(TRACE_GC)
8383     // the final mark list must be sorted
8384     for (uint8_t **x = mark_list; x < mark_list_index - 1; x++)
8385     {
8386         if (x[0] > x[1])
8387         {
8388             dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number));
8389             assert (0);
8390         }
8391     }
8392 #endif //defined(_DEBUG) || defined(TRACE_GC)
8393 }
8394 #else //PARALLEL_MARK_LIST_SORT
8395 void gc_heap::combine_mark_lists()
8396 {
8397     dprintf (3, ("Combining mark lists"));
8398     //verify if a heap has overflowed its mark list
8399     BOOL use_mark_list = TRUE;
8400     for (int i = 0; i < n_heaps; i++)
8401     {
8402         if (g_heaps [i]->mark_list_index >  g_heaps [i]->mark_list_end)
8403         {
8404             use_mark_list = FALSE;
8405             break;
8406         }
8407     }
8408
8409     if (use_mark_list)
8410     {
8411         dprintf (3, ("Using mark list"));
8412         //compact the gaps out of the mark list
8413         int gn = 0;
8414         uint8_t** current_gap = g_heaps [gn]->mark_list_index;
8415         uint8_t** current_gap_end = g_heaps[gn]->mark_list_end + 1;
8416         uint8_t** dst_last = current_gap-1;
8417
8418         int srcn = n_heaps-1;
8419         gc_heap* srch = g_heaps [srcn];
8420         uint8_t** src = srch->mark_list_index - 1;
8421         uint8_t** src_beg = srch->mark_list;
8422
8423         while (current_gap <= src)
8424         {
8425             while ((gn < n_heaps-1) && (current_gap >= current_gap_end))
8426             {
8427                 //go to the next gap
8428                 gn++;
8429                 dprintf (3, ("Going to the next gap %d", gn));
8430                 assert (gn < n_heaps);
8431                 current_gap = g_heaps [gn]->mark_list_index;
8432                 current_gap_end = g_heaps[gn]->mark_list_end + 1;
8433                 assert ((gn == (n_heaps-1)) || (current_gap_end == g_heaps[gn+1]->mark_list));
8434             }
8435             while ((srcn > 0) && (src < src_beg))
8436             {
8437                 //go to the previous source
8438                 srcn--;
8439                 dprintf (3, ("going to the previous source %d", srcn));
8440                 assert (srcn>=0);
8441                 gc_heap* srch = g_heaps [srcn];
8442                 src = srch->mark_list_index - 1;
8443                 src_beg = srch->mark_list;
8444             }
8445             if (current_gap < src)
8446             {
8447                 dst_last = current_gap;
8448                 *current_gap++ = *src--;
8449             }
8450         }
8451         dprintf (3, ("src: %Ix dst_last: %Ix", (size_t)src, (size_t)dst_last));
8452
8453         uint8_t** end_of_list = max (src, dst_last);
8454
8455         //sort the resulting compacted list
8456         assert (end_of_list < &g_mark_list [n_heaps*mark_list_size]);
8457         if (end_of_list > &g_mark_list[0])
8458             _sort (&g_mark_list[0], end_of_list, 0);
8459         //adjust the mark_list to the begining of the resulting mark list.
8460         for (int i = 0; i < n_heaps; i++)
8461         {
8462             g_heaps [i]->mark_list = g_mark_list;
8463             g_heaps [i]->mark_list_index = end_of_list + 1;
8464             g_heaps [i]->mark_list_end = end_of_list + 1;
8465         }
8466     }
8467     else
8468     {
8469         uint8_t** end_of_list = g_mark_list;
8470         //adjust the mark_list to the begining of the resulting mark list.
8471         //put the index beyond the end to turn off mark list processing
8472         for (int i = 0; i < n_heaps; i++)
8473         {
8474             g_heaps [i]->mark_list = g_mark_list;
8475             g_heaps [i]->mark_list_index = end_of_list + 1;
8476             g_heaps [i]->mark_list_end = end_of_list;
8477         }
8478     }
8479 }
8480 #endif // PARALLEL_MARK_LIST_SORT
8481 #endif //MULTIPLE_HEAPS
8482 #endif //MARK_LIST
8483
8484 class seg_free_spaces
8485 {
8486     struct seg_free_space
8487     {
8488         BOOL is_plug;
8489         void* start;
8490     };
8491
8492     struct free_space_bucket
8493     {
8494         seg_free_space* free_space;
8495         ptrdiff_t count_add; // Assigned when we first contruct the array.
8496         ptrdiff_t count_fit; // How many items left when we are fitting plugs.
8497     };
8498
8499     void move_bucket (int old_power2, int new_power2)
8500     {
8501         // PREFAST warning 22015: old_power2 could be negative
8502         assert (old_power2 >= 0);
8503         assert (old_power2 >= new_power2);
8504
8505         if (old_power2 == new_power2)
8506         {
8507             return;
8508         }
8509
8510         seg_free_space* src_index = free_space_buckets[old_power2].free_space;
8511         for (int i = old_power2; i > new_power2; i--)
8512         {
8513             seg_free_space** dest = &(free_space_buckets[i].free_space);
8514             (*dest)++;
8515
8516             seg_free_space* dest_index = free_space_buckets[i - 1].free_space;
8517             if (i > (new_power2 + 1))
8518             {
8519                 seg_free_space temp = *src_index;
8520                 *src_index = *dest_index;
8521                 *dest_index = temp;
8522             }
8523             src_index = dest_index;
8524         }
8525
8526         free_space_buckets[old_power2].count_fit--;
8527         free_space_buckets[new_power2].count_fit++;
8528     }
8529
8530 #ifdef _DEBUG
8531
8532     void dump_free_space (seg_free_space* item)
8533     {
8534         uint8_t* addr = 0;
8535         size_t len = 0;
8536
8537         if (item->is_plug)
8538         {
8539             mark* m = (mark*)(item->start);
8540             len = pinned_len (m);
8541             addr = pinned_plug (m) - len;
8542         }
8543         else
8544         {
8545             heap_segment* seg = (heap_segment*)(item->start);
8546             addr = heap_segment_plan_allocated (seg);
8547             len = heap_segment_committed (seg) - addr;
8548         }
8549
8550         dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len));
8551     }
8552
8553     void dump()
8554     {
8555         seg_free_space* item = NULL;
8556         int i = 0;
8557
8558         dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num));
8559         for (i = 0; i < (free_space_bucket_count - 1); i++)
8560         {
8561             dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8562             dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8563             item = free_space_buckets[i].free_space;
8564             while (item < free_space_buckets[i + 1].free_space)
8565             {
8566                 dump_free_space (item);
8567                 item++;
8568             }
8569             dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8570         }
8571
8572         dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8573         dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8574         item = free_space_buckets[i].free_space;
8575
8576         while (item <= &seg_free_space_array[free_space_item_count - 1])
8577         {
8578             dump_free_space (item);
8579             item++;
8580         }
8581         dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8582     }
8583
8584 #endif //_DEBUG
8585
8586     free_space_bucket* free_space_buckets;
8587     seg_free_space* seg_free_space_array;
8588     ptrdiff_t free_space_bucket_count;
8589     ptrdiff_t free_space_item_count;
8590     int base_power2;
8591     int heap_num;
8592 #ifdef _DEBUG
8593     BOOL has_end_of_seg;
8594 #endif //_DEBUG
8595
8596 public:
8597
8598     seg_free_spaces (int h_number)
8599     {
8600         heap_num = h_number;
8601     }
8602
8603     BOOL alloc ()
8604     {
8605         size_t total_prealloc_size = 
8606             MAX_NUM_BUCKETS * sizeof (free_space_bucket) +
8607             MAX_NUM_FREE_SPACES * sizeof (seg_free_space);
8608
8609         free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size];
8610
8611         return (!!free_space_buckets);
8612     }
8613
8614     // We take the ordered free space array we got from the 1st pass,
8615     // and feed the portion that we decided to use to this method, ie,
8616     // the largest item_count free spaces.
8617     void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count)
8618     {
8619         assert (free_space_buckets);
8620         assert (item_count <= (size_t)MAX_PTR);
8621
8622         free_space_bucket_count = bucket_count;
8623         free_space_item_count = item_count;
8624         base_power2 = base;
8625 #ifdef _DEBUG
8626         has_end_of_seg = FALSE;
8627 #endif //_DEBUG
8628
8629         ptrdiff_t total_item_count = 0;
8630         ptrdiff_t i = 0;
8631
8632         seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count);
8633
8634         for (i = 0; i < (ptrdiff_t)item_count; i++)
8635         {
8636             seg_free_space_array[i].start = 0;
8637             seg_free_space_array[i].is_plug = FALSE;
8638         }
8639
8640         for (i = 0; i < bucket_count; i++)
8641         {
8642             free_space_buckets[i].count_add = ordered_free_spaces[i];
8643             free_space_buckets[i].count_fit = ordered_free_spaces[i];
8644             free_space_buckets[i].free_space = &seg_free_space_array[total_item_count];
8645             total_item_count += free_space_buckets[i].count_add;
8646         }
8647
8648         assert (total_item_count == (ptrdiff_t)item_count);
8649     }
8650
8651     // If we are adding a free space before a plug we pass the
8652     // mark stack position so we can update the length; we could
8653     // also be adding the free space after the last plug in which
8654     // case start is the segment which we'll need to update the 
8655     // heap_segment_plan_allocated.
8656     void add (void* start, BOOL plug_p, BOOL first_p)
8657     {
8658         size_t size = (plug_p ? 
8659                        pinned_len ((mark*)start) : 
8660                        (heap_segment_committed ((heap_segment*)start) - 
8661                            heap_segment_plan_allocated ((heap_segment*)start)));
8662         
8663         if (plug_p)
8664         {
8665             dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size));
8666         }
8667         else
8668         {
8669             dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size));
8670 #ifdef _DEBUG
8671             has_end_of_seg = TRUE;
8672 #endif //_DEBUG
8673         }
8674                   
8675         if (first_p)
8676         {
8677             size_t eph_gen_starts = gc_heap::eph_gen_starts_size;
8678             size -= eph_gen_starts;
8679             if (plug_p)
8680             {
8681                 mark* m = (mark*)(start);
8682                 pinned_len (m) -= eph_gen_starts;
8683             }
8684             else
8685             {
8686                 heap_segment* seg = (heap_segment*)start;
8687                 heap_segment_plan_allocated (seg) += eph_gen_starts;
8688             }
8689         }
8690
8691         int bucket_power2 = index_of_set_bit (round_down_power2 (size));
8692         if (bucket_power2 < base_power2)
8693         {
8694             return;
8695         }
8696
8697         free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2];
8698
8699         seg_free_space* bucket_free_space = bucket->free_space;
8700         assert (plug_p || (!plug_p && bucket->count_add));
8701
8702         if (bucket->count_add == 0)
8703         {
8704             dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2));
8705             return;
8706         }
8707
8708         ptrdiff_t index = bucket->count_add - 1;
8709
8710         dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)", 
8711                     heap_num, 
8712                     (plug_p ? 
8713                         (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) : 
8714                         heap_segment_plan_allocated ((heap_segment*)start)),
8715                     size,
8716                     bucket_power2));
8717
8718         if (plug_p)
8719         {
8720             bucket_free_space[index].is_plug = TRUE;
8721         }
8722
8723         bucket_free_space[index].start = start;
8724         bucket->count_add--;
8725     }
8726
8727 #ifdef _DEBUG
8728
8729     // Do a consistency check after all free spaces are added.
8730     void check()
8731     {
8732         ptrdiff_t i = 0;
8733         int end_of_seg_count = 0;
8734
8735         for (i = 0; i < free_space_item_count; i++)
8736         {
8737             assert (seg_free_space_array[i].start);
8738             if (!(seg_free_space_array[i].is_plug))
8739             {
8740                 end_of_seg_count++;
8741             }
8742         }
8743         
8744         if (has_end_of_seg)
8745         {
8746             assert (end_of_seg_count == 1);
8747         }
8748         else
8749         {
8750             assert (end_of_seg_count == 0);
8751         }
8752
8753         for (i = 0; i < free_space_bucket_count; i++)
8754         {
8755             assert (free_space_buckets[i].count_add == 0);
8756         }
8757     }
8758
8759 #endif //_DEBUG
8760
8761     uint8_t* fit (uint8_t* old_loc,
8762 #ifdef SHORT_PLUGS
8763                BOOL set_padding_on_saved_p,
8764                mark* pinned_plug_entry,
8765 #endif //SHORT_PLUGS
8766                size_t plug_size
8767                REQD_ALIGN_AND_OFFSET_DCL)
8768     {
8769         if (old_loc)
8770         {
8771 #ifdef SHORT_PLUGS
8772             assert (!is_plug_padded (old_loc));
8773 #endif //SHORT_PLUGS
8774             assert (!node_realigned (old_loc));
8775         }
8776
8777         size_t saved_plug_size = plug_size;
8778
8779 #ifdef FEATURE_STRUCTALIGN
8780         // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account
8781         _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false);
8782 #endif // FEATURE_STRUCTALIGN
8783         // TODO: this is also not large alignment ready. We would need to consider alignment when chosing the 
8784         // the bucket.
8785
8786         size_t plug_size_to_fit = plug_size;
8787
8788         int pad_in_front = (old_loc != 0) ? USE_PADDING_FRONT : 0;
8789
8790 #ifdef SHORT_PLUGS
8791         plug_size_to_fit += (pad_in_front ? Align(min_obj_size) : 0);
8792 #endif //SHORT_PLUGS
8793
8794         int plug_power2 = index_of_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size)));
8795         ptrdiff_t i;
8796         uint8_t* new_address = 0;
8797
8798         if (plug_power2 < base_power2)
8799         {
8800             plug_power2 = base_power2;
8801         }
8802
8803         int chosen_power2 = plug_power2 - base_power2;
8804 retry:
8805         for (i = chosen_power2; i < free_space_bucket_count; i++)
8806         {
8807             if (free_space_buckets[i].count_fit != 0)
8808             {
8809                 break;
8810             }
8811             chosen_power2++;
8812         }
8813
8814         dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space", 
8815             heap_num, 
8816             plug_size, 
8817             plug_power2, 
8818             (chosen_power2 + base_power2)));
8819
8820         assert (i < free_space_bucket_count);
8821         
8822         seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space;
8823         ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit;
8824         size_t new_free_space_size = 0;
8825         BOOL can_fit = FALSE;
8826         size_t pad = 0;
8827
8828         for (i = 0; i < free_space_count; i++)
8829         {
8830             size_t free_space_size = 0;
8831             pad = 0;
8832 #ifdef SHORT_PLUGS
8833             BOOL short_plugs_padding_p = FALSE;
8834 #endif //SHORT_PLUGS
8835             BOOL realign_padding_p = FALSE;
8836
8837             if (bucket_free_space[i].is_plug)
8838             {
8839                 mark* m = (mark*)(bucket_free_space[i].start);
8840                 uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m);
8841                 
8842 #ifdef SHORT_PLUGS
8843                 if ((pad_in_front & USE_PADDING_FRONT) &&
8844                     (((plug_free_space_start - pin_allocation_context_start_region (m))==0) ||
8845                     ((plug_free_space_start - pin_allocation_context_start_region (m))>=DESIRED_PLUG_LENGTH)))
8846                 {
8847                     pad = Align (min_obj_size);
8848                     short_plugs_padding_p = TRUE;
8849                 }
8850 #endif //SHORT_PLUGS
8851
8852                 if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start+pad)))
8853                 {
8854                     pad += switch_alignment_size (pad != 0);
8855                     realign_padding_p = TRUE;
8856                 }
8857
8858                 plug_size = saved_plug_size + pad;
8859
8860                 free_space_size = pinned_len (m);
8861                 new_address = pinned_plug (m) - pinned_len (m);
8862
8863                 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8864                     free_space_size == plug_size)
8865                 {
8866                     new_free_space_size = free_space_size - plug_size;
8867                     pinned_len (m) = new_free_space_size;
8868 #ifdef SIMPLE_DPRINTF
8869                     dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)",
8870                                 heap_num, 
8871                                 old_loc,
8872                                 new_address, 
8873                                 (plug_size - pad),
8874                                 pad,
8875                                 pinned_plug (m), 
8876                                 index_of_set_bit (round_down_power2 (free_space_size)),
8877                                 (pinned_plug (m) - pinned_len (m)), 
8878                                 index_of_set_bit (round_down_power2 (new_free_space_size))));
8879 #endif //SIMPLE_DPRINTF
8880
8881 #ifdef SHORT_PLUGS
8882                     if (short_plugs_padding_p)
8883                     {
8884                         pin_allocation_context_start_region (m) = plug_free_space_start;
8885                         set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
8886                     }
8887 #endif //SHORT_PLUGS
8888
8889                     if (realign_padding_p)
8890                     {
8891                         set_node_realigned (old_loc);
8892                     }
8893
8894                     can_fit = TRUE;
8895                 }
8896             }
8897             else
8898             {
8899                 heap_segment* seg = (heap_segment*)(bucket_free_space[i].start);
8900                 free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
8901
8902                 if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg))))
8903                 {
8904                     pad = switch_alignment_size (FALSE);
8905                     realign_padding_p = TRUE;
8906                 }
8907
8908                 plug_size = saved_plug_size + pad;
8909
8910                 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8911                     free_space_size == plug_size)
8912                 {
8913                     new_address = heap_segment_plan_allocated (seg);
8914                     new_free_space_size = free_space_size - plug_size;
8915                     heap_segment_plan_allocated (seg) = new_address + plug_size;
8916 #ifdef SIMPLE_DPRINTF
8917                     dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)",
8918                                 heap_num, 
8919                                 old_loc,
8920                                 new_address, 
8921                                 (plug_size - pad),
8922                                 index_of_set_bit (round_down_power2 (free_space_size)),
8923                                 heap_segment_plan_allocated (seg), 
8924                                 index_of_set_bit (round_down_power2 (new_free_space_size))));
8925 #endif //SIMPLE_DPRINTF
8926
8927                     if (realign_padding_p)
8928                         set_node_realigned (old_loc);
8929
8930                     can_fit = TRUE;
8931                 }
8932             }
8933
8934             if (can_fit)
8935             {
8936                 break;
8937             }
8938         }
8939
8940         if (!can_fit)
8941         {
8942             assert (chosen_power2 == 0);
8943             chosen_power2 = 1;
8944             goto retry;
8945         }
8946         else
8947         {
8948             if (pad)
8949             {
8950                 new_address += pad;
8951             }
8952             assert ((chosen_power2 && (i == 0)) ||
8953                     (!chosen_power2) && (i < free_space_count));
8954         }
8955
8956         int new_bucket_power2 = index_of_set_bit (round_down_power2 (new_free_space_size));
8957
8958         if (new_bucket_power2 < base_power2)
8959         {
8960             new_bucket_power2 = base_power2;
8961         }
8962
8963         move_bucket (chosen_power2, new_bucket_power2 - base_power2);
8964
8965         //dump();
8966
8967         return new_address;
8968     }
8969
8970     void cleanup ()
8971     {
8972         if (free_space_buckets)
8973         {
8974             delete [] free_space_buckets;
8975         }
8976         if (seg_free_space_array)
8977         {
8978             delete [] seg_free_space_array;
8979         }
8980     }
8981 };
8982
8983
8984 #define marked(i) header(i)->IsMarked()
8985 #define set_marked(i) header(i)->SetMarked()
8986 #define clear_marked(i) header(i)->ClearMarked()
8987 #define pinned(i) header(i)->IsPinned()
8988 #define set_pinned(i) header(i)->SetPinned()
8989 #define clear_pinned(i) header(i)->GetHeader()->ClrGCBit();
8990
8991 inline size_t my_get_size (Object* ob)
8992 {
8993     MethodTable* mT = header(ob)->GetMethodTable();
8994     return (mT->GetBaseSize() +
8995             (mT->HasComponentSize() ?
8996              ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0));
8997 }
8998
8999 //#define size(i) header(i)->GetSize()
9000 #define size(i) my_get_size (header(i))
9001
9002 #define contain_pointers(i) header(i)->ContainsPointers()
9003 #ifdef COLLECTIBLE_CLASS
9004 #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible()
9005
9006 #define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i)
9007 #define is_collectible(i) method_table(i)->Collectible()
9008 #else //COLLECTIBLE_CLASS
9009 #define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
9010 #endif //COLLECTIBLE_CLASS
9011
9012 #if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
9013 inline
9014 void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
9015 {
9016     uint8_t* range_beg = 0;
9017     uint8_t* range_end = 0;
9018     if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end))
9019     {
9020         clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE
9021 #ifdef FEATURE_BASICFREEZE
9022             , TRUE
9023 #endif // FEATURE_BASICFREEZE
9024             );
9025     }
9026 }
9027
9028 void gc_heap::clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9029 {
9030     if ((start < background_saved_highest_address) &&
9031         (end > background_saved_lowest_address))
9032     {
9033         start = max (start, background_saved_lowest_address);
9034         end = min (end, background_saved_highest_address);
9035
9036         size_t start_mark_bit = mark_bit_of (start);
9037         size_t end_mark_bit = mark_bit_of (end);
9038         unsigned int startbit = mark_bit_bit (start_mark_bit);
9039         unsigned int endbit = mark_bit_bit (end_mark_bit);
9040         size_t startwrd = mark_bit_word (start_mark_bit);
9041         size_t endwrd = mark_bit_word (end_mark_bit);
9042
9043         dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
9044             (size_t)start, (size_t)start_mark_bit, 
9045             (size_t)end, (size_t)end_mark_bit));
9046
9047         unsigned int firstwrd = lowbits (~0, startbit);
9048         unsigned int lastwrd = highbits (~0, endbit);
9049
9050         if (startwrd == endwrd)
9051         {
9052             unsigned int wrd = firstwrd | lastwrd;
9053             mark_array[startwrd] &= wrd;
9054             return;
9055         }
9056
9057         // clear the first mark word.
9058         if (startbit)
9059         {
9060             mark_array[startwrd] &= firstwrd;
9061             startwrd++;
9062         }
9063
9064         for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
9065         {
9066             mark_array[wrdtmp] = 0;
9067         }
9068
9069         // clear the last mark word.
9070         if (endbit)
9071         {
9072             mark_array[endwrd] &= lastwrd;
9073         }
9074     }
9075 }
9076
9077 void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9078 {
9079     if ((start < background_saved_highest_address) &&
9080         (end > background_saved_lowest_address))
9081     {
9082         start = max (start, background_saved_lowest_address);
9083         end = min (end, background_saved_highest_address);
9084
9085         clear_batch_mark_array_bits (start, end);
9086     }
9087 }
9088
9089 void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
9090 {
9091     dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix", 
9092                   from, end));
9093     int align_const = get_alignment_constant (!loh_p);
9094
9095     uint8_t* o = from;
9096
9097     while (o < end)
9098     {
9099         uint8_t*  next_o = o + Align (size (o), align_const);
9100
9101         if (background_object_marked (o, TRUE))
9102         {
9103             dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
9104         }
9105
9106         o = next_o;
9107     }
9108 }
9109 #endif //MARK_ARRAY && BACKGROUND_GC
9110
9111 inline
9112 BOOL gc_heap::is_mark_set (uint8_t* o)
9113 {
9114     return marked (o);
9115 }
9116
9117 #if defined (_MSC_VER) && defined (_TARGET_X86_)
9118 #pragma optimize("y", on)        // Small critical routines, don't put in EBP frame 
9119 #endif //_MSC_VER && _TARGET_X86_
9120
9121 // return the generation number of an object.
9122 // It is assumed that the object is valid.
9123 //Note that this will return max_generation for a LOH object
9124 int gc_heap::object_gennum (uint8_t* o)
9125 {
9126     if (in_range_for_segment (o, ephemeral_heap_segment) &&
9127         (o >= generation_allocation_start (generation_of (max_generation-1))))
9128     {
9129         // in an ephemeral generation.
9130         for ( int i = 0; i < max_generation-1; i++)
9131         {
9132             if ((o >= generation_allocation_start (generation_of (i))))
9133                 return i;
9134         }
9135         return max_generation-1;
9136     }
9137     else
9138     {
9139         return max_generation;
9140     }
9141 }
9142
9143 int gc_heap::object_gennum_plan (uint8_t* o)
9144 {
9145     if (in_range_for_segment (o, ephemeral_heap_segment))
9146     {
9147         for (int i = 0; i <= max_generation-1; i++)
9148         {
9149             uint8_t* plan_start = generation_plan_allocation_start (generation_of (i));
9150             if (plan_start && (o >= plan_start))
9151             {
9152                 return i;
9153             }
9154         }
9155     }
9156     return max_generation;
9157 }
9158
9159 #if defined(_MSC_VER) && defined(_TARGET_X86_)
9160 #pragma optimize("", on)        // Go back to command line default optimizations
9161 #endif //_MSC_VER && _TARGET_X86_
9162
9163 heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
9164 {
9165     size_t initial_commit = SEGMENT_INITIAL_COMMIT;
9166
9167     //Commit the first page
9168     if (!virtual_alloc_commit_for_heap (new_pages, initial_commit, h_number))
9169     {
9170         return 0;
9171     }
9172
9173     //overlay the heap_segment
9174     heap_segment* new_segment = (heap_segment*)new_pages;
9175
9176     uint8_t* start = new_pages + segment_info_size;
9177     heap_segment_mem (new_segment) = start;
9178     heap_segment_used (new_segment) = start;
9179     heap_segment_reserved (new_segment) = new_pages + size;
9180     heap_segment_committed (new_segment) = new_pages + initial_commit;
9181     init_heap_segment (new_segment);
9182     dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
9183     return new_segment;
9184 }
9185
9186 void gc_heap::init_heap_segment (heap_segment* seg)
9187 {
9188     seg->flags = 0;
9189     heap_segment_next (seg) = 0;
9190     heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
9191     heap_segment_allocated (seg) = heap_segment_mem (seg);
9192 #ifdef BACKGROUND_GC
9193     heap_segment_background_allocated (seg) = 0;
9194     heap_segment_saved_bg_allocated (seg) = 0;
9195 #endif //BACKGROUND_GC
9196 }
9197
9198 //Releases the segment to the OS.
9199 // this is always called on one thread only so calling seg_table->remove is fine.
9200 void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
9201 {
9202     if (!heap_segment_loh_p (seg))
9203     {
9204         //cleanup the brick table back to the empty value
9205         clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg));
9206     }
9207
9208     if (consider_hoarding)
9209     {
9210         assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE));
9211         size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg);
9212         //Don't keep the big ones.
9213         if (ss <= INITIAL_ALLOC)
9214         {
9215             dprintf (2, ("Hoarding segment %Ix", (size_t)seg));
9216 #ifdef BACKGROUND_GC
9217             // We don't need to clear the decommitted flag because when this segment is used
9218             // for a new segment the flags will be cleared.
9219             if (!heap_segment_decommitted_p (seg))
9220 #endif //BACKGROUND_GC
9221             {
9222                 decommit_heap_segment (seg);
9223             }
9224
9225 #ifdef SEG_MAPPING_TABLE
9226             seg_mapping_table_remove_segment (seg);
9227 #endif //SEG_MAPPING_TABLE
9228
9229             heap_segment_next (seg) = segment_standby_list;
9230             segment_standby_list = seg;
9231             seg = 0;
9232         }
9233     }
9234
9235     if (seg != 0)
9236     {
9237         dprintf (2, ("h%d: del seg: [%Ix, %Ix[", 
9238                      heap_number, (size_t)seg,
9239                      (size_t)(heap_segment_reserved (seg))));
9240
9241 #ifdef BACKGROUND_GC
9242         ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg), 
9243                             settings.gc_index, current_bgc_state,
9244                             seg_deleted);
9245         decommit_mark_array_by_seg (seg);
9246 #endif //BACKGROUND_GC
9247
9248 #ifdef SEG_MAPPING_TABLE
9249         seg_mapping_table_remove_segment (seg);
9250 #else //SEG_MAPPING_TABLE
9251         seg_table->remove ((uint8_t*)seg);
9252 #endif //SEG_MAPPING_TABLE
9253
9254         release_segment (seg);
9255     }
9256 }
9257
9258 //resets the pages beyond allocates size so they won't be swapped out and back in
9259
9260 void gc_heap::reset_heap_segment_pages (heap_segment* seg)
9261 {
9262     size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
9263     size_t size = (size_t)heap_segment_committed (seg) - page_start;
9264     if (size != 0)
9265         GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
9266 }
9267
9268 void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
9269                                            size_t extra_space)
9270 {
9271     uint8_t*  page_start = align_on_page (heap_segment_allocated(seg));
9272     size_t size = heap_segment_committed (seg) - page_start;
9273     extra_space = align_on_page (extra_space);
9274     if (size >= max ((extra_space + 2*OS_PAGE_SIZE), 100*OS_PAGE_SIZE))
9275     {
9276         page_start += max(extra_space, 32*OS_PAGE_SIZE);
9277         size -= max (extra_space, 32*OS_PAGE_SIZE);
9278
9279         GCToOSInterface::VirtualDecommit (page_start, size);
9280         dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)", 
9281             (size_t)page_start, 
9282             (size_t)(page_start + size),
9283             size));
9284         heap_segment_committed (seg) = page_start;
9285         if (heap_segment_used (seg) > heap_segment_committed (seg))
9286         {
9287             heap_segment_used (seg) = heap_segment_committed (seg);
9288         }
9289     }
9290 }
9291
9292 //decommit all pages except one or 2
9293 void gc_heap::decommit_heap_segment (heap_segment* seg)
9294 {
9295     uint8_t*  page_start = align_on_page (heap_segment_mem (seg));
9296
9297     dprintf (3, ("Decommitting heap segment %Ix", (size_t)seg));
9298
9299 #ifdef BACKGROUND_GC
9300     page_start += OS_PAGE_SIZE;
9301 #endif //BACKGROUND_GC
9302
9303     size_t size = heap_segment_committed (seg) - page_start;
9304     GCToOSInterface::VirtualDecommit (page_start, size);
9305
9306     //re-init the segment object
9307     heap_segment_committed (seg) = page_start;
9308     if (heap_segment_used (seg) > heap_segment_committed (seg))
9309     {
9310         heap_segment_used (seg) = heap_segment_committed (seg);
9311     }
9312 }
9313
9314 void gc_heap::clear_gen0_bricks()
9315 {
9316     if (!gen0_bricks_cleared)
9317     {
9318         gen0_bricks_cleared = TRUE;
9319         //initialize brick table for gen 0
9320         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
9321                 b < brick_of (align_on_brick
9322                             (heap_segment_allocated (ephemeral_heap_segment)));
9323                 b++)
9324         {
9325             set_brick (b, -1);
9326         }
9327     }
9328 }
9329
9330 #ifdef BACKGROUND_GC
9331 void gc_heap::rearrange_small_heap_segments()
9332 {
9333     heap_segment* seg = freeable_small_heap_segment;
9334     while (seg)
9335     {
9336         heap_segment* next_seg = heap_segment_next (seg);
9337         // TODO: we need to consider hoarding here.
9338         delete_heap_segment (seg, FALSE);
9339         seg = next_seg;
9340     }
9341     freeable_small_heap_segment = 0;
9342 }
9343 #endif //BACKGROUND_GC
9344
9345 void gc_heap::rearrange_large_heap_segments()
9346 {
9347     dprintf (2, ("deleting empty large segments"));
9348     heap_segment* seg = freeable_large_heap_segment;
9349     while (seg)
9350     {
9351         heap_segment* next_seg = heap_segment_next (seg);
9352         delete_heap_segment (seg, GCConfig::GetRetainVM());
9353         seg = next_seg;
9354     }
9355     freeable_large_heap_segment = 0;
9356 }
9357
9358 void gc_heap::rearrange_heap_segments(BOOL compacting)
9359 {
9360     heap_segment* seg =
9361         generation_start_segment (generation_of (max_generation));
9362
9363     heap_segment* prev_seg = 0;
9364     heap_segment* next_seg = 0;
9365     while (seg)
9366     {
9367         next_seg = heap_segment_next (seg);
9368
9369         //link ephemeral segment when expanding
9370         if ((next_seg == 0) && (seg != ephemeral_heap_segment))
9371         {
9372             seg->next = ephemeral_heap_segment;
9373             next_seg = heap_segment_next (seg);
9374         }
9375
9376         //re-used expanded heap segment
9377         if ((seg == ephemeral_heap_segment) && next_seg)
9378         {
9379             heap_segment_next (prev_seg) = next_seg;
9380             heap_segment_next (seg) = 0;
9381         }
9382         else
9383         {
9384             uint8_t* end_segment = (compacting ?
9385                                  heap_segment_plan_allocated (seg) : 
9386                                  heap_segment_allocated (seg));
9387             // check if the segment was reached by allocation
9388             if ((end_segment == heap_segment_mem (seg))&&
9389                 !heap_segment_read_only_p (seg))
9390             {
9391                 //if not, unthread and delete
9392                 assert (prev_seg);
9393                 assert (seg != ephemeral_heap_segment);
9394                 heap_segment_next (prev_seg) = next_seg;
9395                 delete_heap_segment (seg, GCConfig::GetRetainVM());
9396
9397                 dprintf (2, ("Deleting heap segment %Ix", (size_t)seg));
9398             }
9399             else
9400             {
9401                 if (!heap_segment_read_only_p (seg))
9402                 {
9403                     if (compacting)
9404                     {
9405                         heap_segment_allocated (seg) =
9406                             heap_segment_plan_allocated (seg);
9407                     }
9408
9409                     // reset the pages between allocated and committed.
9410                     if (seg != ephemeral_heap_segment)
9411                     {
9412                         decommit_heap_segment_pages (seg, 0);
9413                     }
9414                 }
9415                 prev_seg = seg;
9416             }
9417         }
9418
9419         seg = next_seg;
9420     }
9421 }
9422
9423
9424 #ifdef WRITE_WATCH
9425
9426 uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch
9427
9428 #ifdef TIME_WRITE_WATCH
9429 static unsigned int tot_cycles = 0;
9430 #endif //TIME_WRITE_WATCH
9431
9432 #ifdef CARD_BUNDLE
9433
9434 inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word)
9435 {
9436 #ifdef _DEBUG
9437     for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++)
9438     {
9439         if (!card_bundle_set_p (x))
9440         {
9441             assert (!"Card bundle not set");
9442             dprintf (3, ("Card bundle %Ix not set", x));
9443         }
9444     }
9445 #endif
9446 }
9447
9448 // Verifies that any bundles that are not set represent only cards that are not set.
9449 inline void gc_heap::verify_card_bundles()
9450 {
9451 #ifdef _DEBUG
9452     size_t lowest_card = card_word (card_of (lowest_address));
9453     size_t highest_card = card_word (card_of (highest_address));
9454     size_t cardb = cardw_card_bundle (lowest_card);
9455     size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));
9456
9457     while (cardb < end_cardb)
9458     {
9459         uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
9460         uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];
9461
9462         if (card_bundle_set_p (cardb) == 0)
9463         {
9464             // Verify that no card is set
9465             while (card_word < card_word_end)
9466             {
9467                 if (*card_word != 0)
9468                 {
9469                     dprintf  (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
9470                             dd_collection_count (dynamic_data_of (0)), 
9471                             (size_t)(card_word-&card_table[0]),
9472                             (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
9473                 }
9474
9475                 assert((*card_word)==0);
9476                 card_word++;
9477             }
9478         }
9479
9480         cardb++;
9481     }
9482 #endif
9483 }
9484
9485 // If card bundles are enabled, use write watch to find pages in the card table that have 
9486 // been dirtied, and set the corresponding card bundle bits.
9487 void gc_heap::update_card_table_bundle()
9488 {
9489     if (card_bundles_enabled())
9490     {
9491         // The address of the card word containing the card representing the lowest heap address
9492         uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
9493
9494         // The address of the card word containing the card representing the highest heap address
9495         uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
9496         
9497         uint8_t* saved_base_address = base_address;
9498         uintptr_t bcount = array_size;
9499         size_t saved_region_size = align_on_page (high_address) - saved_base_address;
9500
9501         do
9502         {
9503             size_t region_size = align_on_page (high_address) - base_address;
9504
9505             dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
9506             bool success = GCToOSInterface::GetWriteWatch(false /* resetState */,
9507                                                           base_address,
9508                                                           region_size,
9509                                                           (void**)g_addresses,
9510                                                           &bcount);
9511             assert (success && "GetWriteWatch failed!");
9512
9513             dprintf (3,("Found %d pages written", bcount));
9514             for (unsigned i = 0; i < bcount; i++)
9515             {
9516                 // Offset of the dirty page from the start of the card table (clamped to base_address)
9517                 size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
9518
9519                 // Offset of the end of the page from the start of the card table (clamped to high addr)
9520                 size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
9521                 assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
9522
9523                 // Set the card bundle bits representing the dirty card table page
9524                 card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)));
9525                 dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));
9526
9527                 verify_card_bundle_bits_set(bcardw, ecardw);
9528             }
9529
9530             if (bcount >= array_size)
9531             {
9532                 base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
9533                 bcount = array_size;
9534             }
9535
9536         } while ((bcount >= array_size) && (base_address < high_address));
9537
9538         // Now that we've updated the card bundle bits, reset the write-tracking state. 
9539         GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
9540     }
9541 }
9542 #endif //CARD_BUNDLE
9543
9544 // static
9545 void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size)
9546 {
9547 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9548     SoftwareWriteWatch::ClearDirty(base_address, region_size);
9549 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9550     GCToOSInterface::ResetWriteWatch(base_address, region_size);
9551 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9552 }
9553
9554 // static
9555 void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended)
9556 {
9557 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9558     SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended);
9559 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9560     UNREFERENCED_PARAMETER(is_runtime_suspended);
9561     bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref);
9562     assert(success);
9563 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9564 }
9565
9566 const size_t ww_reset_quantum = 128*1024*1024;
9567
9568 inline
9569 void gc_heap::switch_one_quantum()
9570 {
9571     enable_preemptive ();
9572     GCToOSInterface::Sleep (1);
9573     disable_preemptive (true);
9574 }
9575
9576 void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size)
9577 {
9578     size_t reset_size = 0;
9579     size_t remaining_reset_size = 0;
9580     size_t next_reset_size = 0;
9581
9582     while (reset_size != total_reset_size)
9583     {
9584         remaining_reset_size = total_reset_size - reset_size;
9585         next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
9586         if (next_reset_size)
9587         {
9588             reset_write_watch_for_gc_heap(start_address, next_reset_size);
9589             reset_size += next_reset_size;
9590
9591             switch_one_quantum();
9592         }
9593     }
9594
9595     assert (reset_size == total_reset_size);
9596 }
9597
9598 // This does a Sleep(1) for every reset ww_reset_quantum bytes of reset 
9599 // we do concurrently.
9600 void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
9601 {
9602     if (concurrent_p)
9603     {
9604         *current_total_reset_size += last_reset_size;
9605
9606         dprintf (2, ("reset %Id bytes so far", *current_total_reset_size));
9607
9608         if (*current_total_reset_size > ww_reset_quantum)
9609         {
9610             switch_one_quantum();
9611
9612             *current_total_reset_size = 0;
9613         }
9614     }
9615 }
9616
9617 void gc_heap::reset_write_watch (BOOL concurrent_p)
9618 {
9619 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9620     // Software write watch currently requires the runtime to be suspended during reset. See SoftwareWriteWatch::ClearDirty().
9621     assert(!concurrent_p);
9622 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9623
9624     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
9625
9626     PREFIX_ASSUME(seg != NULL);
9627
9628     size_t reset_size = 0;
9629     size_t region_size = 0;
9630
9631     dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address));
9632
9633     while (seg)
9634     {
9635         uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9636         base_address = max (base_address, background_saved_lowest_address);
9637
9638         uint8_t* high_address = 0;
9639         high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
9640         high_address = min (high_address, background_saved_highest_address);
9641         
9642         if (base_address < high_address)
9643         {
9644             region_size = high_address - base_address;
9645
9646 #ifdef TIME_WRITE_WATCH
9647             unsigned int time_start = GetCycleCount32();
9648 #endif //TIME_WRITE_WATCH
9649             dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9650             //reset_ww_by_chunk (base_address, region_size);
9651             reset_write_watch_for_gc_heap(base_address, region_size);
9652
9653 #ifdef TIME_WRITE_WATCH
9654             unsigned int time_stop = GetCycleCount32();
9655             tot_cycles += time_stop - time_start;
9656             printf ("ResetWriteWatch Duration: %d, total: %d\n",
9657                     time_stop - time_start, tot_cycles);
9658 #endif //TIME_WRITE_WATCH
9659
9660             switch_on_reset (concurrent_p, &reset_size, region_size);
9661         }
9662
9663         seg = heap_segment_next_rw (seg);
9664
9665         concurrent_print_time_delta ("CRWW soh");
9666     }
9667
9668     //concurrent_print_time_delta ("CRW soh");
9669
9670     seg = heap_segment_rw (generation_start_segment (large_object_generation));
9671
9672     PREFIX_ASSUME(seg != NULL);
9673
9674     while (seg)
9675     {
9676         uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9677         uint8_t* high_address =  heap_segment_allocated (seg);
9678
9679         base_address = max (base_address, background_saved_lowest_address);
9680         high_address = min (high_address, background_saved_highest_address);
9681
9682         if (base_address < high_address)
9683         {
9684             region_size = high_address - base_address;
9685             
9686 #ifdef TIME_WRITE_WATCH
9687             unsigned int time_start = GetCycleCount32();
9688 #endif //TIME_WRITE_WATCH
9689             dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9690             //reset_ww_by_chunk (base_address, region_size);
9691             reset_write_watch_for_gc_heap(base_address, region_size);
9692
9693 #ifdef TIME_WRITE_WATCH
9694             unsigned int time_stop = GetCycleCount32();
9695             tot_cycles += time_stop - time_start;
9696             printf ("ResetWriteWatch Duration: %d, total: %d\n",
9697                     time_stop - time_start, tot_cycles);
9698 #endif //TIME_WRITE_WATCH
9699     
9700             switch_on_reset (concurrent_p, &reset_size, region_size);
9701         }
9702
9703         seg = heap_segment_next_rw (seg);
9704
9705         concurrent_print_time_delta ("CRWW loh");
9706     }
9707
9708 #ifdef DEBUG_WRITE_WATCH
9709     debug_write_watch = (uint8_t**)~0;
9710 #endif //DEBUG_WRITE_WATCH
9711 }
9712
9713 #endif //WRITE_WATCH
9714
9715 #ifdef BACKGROUND_GC
9716 void gc_heap::restart_vm()
9717 {
9718     //assert (generation_allocation_pointer (youngest_generation) == 0);
9719     dprintf (3, ("Restarting EE"));
9720     STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Retarting EE\n");
9721     ee_proceed_event.Set();
9722 }
9723
9724 inline
9725 void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p)
9726 {
9727     if (awr != awr_ignored)
9728     {
9729         if (begin_p)
9730         {
9731             FIRE_EVENT(BGCAllocWaitBegin, awr);
9732         }
9733         else
9734         {
9735             FIRE_EVENT(BGCAllocWaitEnd, awr);
9736         }
9737     }
9738 }
9739
9740
9741 void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr)
9742 {
9743     fire_alloc_wait_event (awr, TRUE);
9744 }
9745
9746
9747 void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr)
9748 {
9749     fire_alloc_wait_event (awr, FALSE);
9750 }
9751 #endif //BACKGROUND_GC
9752 void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* start, uint8_t* pointer)
9753 {
9754     gen.allocation_start = start;
9755     gen.allocation_context.alloc_ptr = pointer;
9756     gen.allocation_context.alloc_limit = pointer;
9757     gen.allocation_context.alloc_bytes = 0;
9758     gen.allocation_context.alloc_bytes_loh = 0;
9759     gen.allocation_context_start_region = pointer;
9760     gen.start_segment = seg;
9761     gen.allocation_segment = seg;
9762     gen.plan_allocation_start = 0;
9763     gen.free_list_space = 0;
9764     gen.pinned_allocated = 0; 
9765     gen.free_list_allocated = 0; 
9766     gen.end_seg_allocated = 0;
9767     gen.condemned_allocated = 0; 
9768     gen.free_obj_space = 0;
9769     gen.allocation_size = 0;
9770     gen.pinned_allocation_sweep_size = 0;
9771     gen.pinned_allocation_compact_size = 0;
9772     gen.allocate_end_seg_p = FALSE;
9773     gen.free_list_allocator.clear();
9774
9775 #ifdef FREE_USAGE_STATS
9776     memset (gen.gen_free_spaces, 0, sizeof (gen.gen_free_spaces));
9777     memset (gen.gen_current_pinned_free_spaces, 0, sizeof (gen.gen_current_pinned_free_spaces));
9778     memset (gen.gen_plugs, 0, sizeof (gen.gen_plugs));
9779 #endif //FREE_USAGE_STATS
9780 }
9781
9782 void gc_heap::adjust_ephemeral_limits ()
9783 {
9784     ephemeral_low = generation_allocation_start (generation_of (max_generation - 1));
9785     ephemeral_high = heap_segment_reserved (ephemeral_heap_segment);
9786
9787     dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix",
9788                  (size_t)ephemeral_low, (size_t)ephemeral_high))
9789
9790 #ifndef MULTIPLE_HEAPS
9791     // This updates the write barrier helpers with the new info.
9792     stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high);
9793 #endif // MULTIPLE_HEAPS
9794 }
9795
9796 #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
9797 FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config)
9798 {
9799     FILE* logFile;
9800
9801     if (!temp_logfile_name.Get())
9802     {
9803         return nullptr;
9804     }
9805
9806     char logfile_name[MAX_LONGPATH+1];
9807     uint32_t pid = GCToOSInterface::GetCurrentProcessId();
9808     const char* suffix = is_config ? ".config.log" : ".log";
9809     _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix);
9810     logFile = fopen(logfile_name, "wb");
9811     return logFile;
9812 }
9813 #endif //TRACE_GC || GC_CONFIG_DRIVEN
9814
9815 HRESULT gc_heap::initialize_gc (size_t segment_size,
9816                                 size_t heap_size
9817 #ifdef MULTIPLE_HEAPS
9818                                 ,unsigned number_of_heaps
9819 #endif //MULTIPLE_HEAPS
9820 )
9821 {
9822 #ifdef TRACE_GC
9823     if (GCConfig::GetLogEnabled())
9824     {
9825         gc_log = CreateLogFile(GCConfig::GetLogFile(), false);
9826
9827         if (gc_log == NULL)
9828             return E_FAIL;
9829
9830         // GCLogFileSize in MBs.
9831         gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize());
9832
9833         if (gc_log_file_size <= 0 || gc_log_file_size > 500)
9834         {
9835             fclose (gc_log);
9836             return E_FAIL;
9837         }
9838
9839         gc_log_lock.Initialize();
9840         gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
9841         if (!gc_log_buffer)
9842         {
9843             fclose(gc_log);
9844             return E_FAIL;
9845         }
9846
9847         memset (gc_log_buffer, '*', gc_log_buffer_size);
9848
9849         max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size;
9850     }
9851 #endif // TRACE_GC
9852
9853 #ifdef GC_CONFIG_DRIVEN
9854     if (GCConfig::GetConfigLogEnabled())
9855     {
9856         gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true);
9857
9858         if (gc_config_log == NULL)
9859             return E_FAIL;
9860
9861         gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
9862         if (!gc_config_log_buffer)
9863         {
9864             fclose(gc_config_log);
9865             return E_FAIL;
9866         }
9867
9868         compact_ratio = static_cast<int>(GCConfig::GetCompactRatio());
9869
9870         //         h#  | GC  | gen | C   | EX   | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP | 
9871         cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
9872                 "h#", // heap index
9873                 "GC", // GC index
9874                 "g", // generation
9875                 "C",  // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not
9876                 "EX", // heap expansion
9877                 "NF", // normal fit
9878                 "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg.
9879                 "ML", // mark list
9880                 "DM", // demotion
9881                 "PreS", // short object before pinned plug
9882                 "PostS", // short object after pinned plug
9883                 "Merge", // merged pinned plugs
9884                 "Conv", // converted to pinned plug
9885                 "Pre", // plug before pinned plug but not after
9886                 "Post", // plug after pinned plug but not before
9887                 "PrPo", // plug both before and after pinned plug
9888                 "PreP", // pre short object padded
9889                 "PostP" // post short object padded
9890                 ));
9891     }
9892 #endif //GC_CONFIG_DRIVEN
9893
9894 #ifdef GC_STATS
9895     GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
9896     if (logFileName.Get() != nullptr)
9897     {
9898         GCStatistics::logFileName = _strdup(logFileName.Get());
9899         GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
9900         if (!GCStatistics::logFile)
9901         {
9902             return E_FAIL;
9903         }
9904     }
9905 #endif // GC_STATS
9906
9907     HRESULT hres = S_OK;
9908
9909 #ifdef WRITE_WATCH
9910     hardware_write_watch_api_supported();
9911 #ifdef BACKGROUND_GC
9912     if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC())
9913     {
9914         gc_can_use_concurrent = true;
9915 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9916         virtual_alloc_hardware_write_watch = true;
9917 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9918     }
9919     else
9920     {
9921         gc_can_use_concurrent = false;
9922     }
9923 #endif //BACKGROUND_GC
9924 #endif //WRITE_WATCH
9925
9926 #ifdef BACKGROUND_GC
9927     // leave the first page to contain only segment info
9928     // because otherwise we could need to revisit the first page frequently in 
9929     // background GC.
9930     segment_info_size = OS_PAGE_SIZE;
9931 #else
9932     segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE));
9933 #endif //BACKGROUND_GC
9934
9935     reserved_memory = 0;
9936     unsigned block_count;
9937 #ifdef MULTIPLE_HEAPS
9938     reserved_memory_limit = (segment_size + heap_size) * number_of_heaps;
9939     block_count = number_of_heaps;
9940 #else //MULTIPLE_HEAPS
9941     reserved_memory_limit = segment_size + heap_size;
9942     block_count = 1;
9943 #endif //MULTIPLE_HEAPS
9944
9945     if (!reserve_initial_memory(segment_size,heap_size,block_count))
9946         return E_OUTOFMEMORY;
9947
9948 #ifdef CARD_BUNDLE
9949     //check if we need to turn on card_bundles.
9950 #ifdef MULTIPLE_HEAPS
9951     // use INT64 arithmetic here because of possible overflow on 32p
9952     uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps;
9953 #else
9954     // use INT64 arithmetic here because of possible overflow on 32p
9955     uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
9956 #endif //MULTIPLE_HEAPS
9957
9958     if (can_use_write_watch_for_card_table() && reserved_memory >= th)
9959     {
9960         settings.card_bundles = TRUE;
9961     }
9962     else
9963     {
9964         settings.card_bundles = FALSE;
9965     }
9966 #endif //CARD_BUNDLE
9967
9968     settings.first_init();
9969
9970     int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel());
9971     if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last)
9972     {
9973         gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config);
9974     }
9975
9976     init_static_data();
9977
9978     g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);
9979
9980     if (!g_gc_card_table)
9981         return E_OUTOFMEMORY;
9982
9983     gc_started = FALSE;
9984
9985 #ifdef MULTIPLE_HEAPS
9986     n_heaps = number_of_heaps;
9987
9988     g_heaps = new (nothrow) gc_heap* [number_of_heaps];
9989     if (!g_heaps)
9990         return E_OUTOFMEMORY;
9991
9992 #ifdef _PREFAST_ 
9993 #pragma warning(push)
9994 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
9995 #endif // _PREFAST_
9996     g_promoted = new (nothrow) size_t [number_of_heaps*16];
9997     g_bpromoted = new (nothrow) size_t [number_of_heaps*16];
9998 #ifdef MH_SC_MARK
9999     g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
10000 #endif //MH_SC_MARK
10001 #ifdef _PREFAST_ 
10002 #pragma warning(pop)
10003 #endif // _PREFAST_
10004     if (!g_promoted || !g_bpromoted)
10005         return E_OUTOFMEMORY;
10006
10007 #ifdef MH_SC_MARK
10008     if (!g_mark_stack_busy)
10009         return E_OUTOFMEMORY;
10010 #endif //MH_SC_MARK
10011
10012     if (!create_thread_support (number_of_heaps))
10013         return E_OUTOFMEMORY;
10014
10015     if (!heap_select::init (number_of_heaps))
10016         return E_OUTOFMEMORY;
10017
10018 #endif //MULTIPLE_HEAPS
10019
10020     if (!init_semi_shared())
10021     {
10022         hres = E_FAIL;
10023     }
10024
10025     return hres;
10026 }
10027
10028 //Initializes PER_HEAP_ISOLATED data members.
10029 int
10030 gc_heap::init_semi_shared()
10031 {
10032     int ret = 0;
10033
10034     // This is used for heap expansion - it's to fix exactly the start for gen 0
10035     // through (max_generation-1). When we expand the heap we allocate all these
10036     // gen starts at the beginning of the new ephemeral seg. 
10037     eph_gen_starts_size = (Align (min_obj_size)) * max_generation;
10038
10039 #ifdef MARK_LIST
10040 #ifdef MULTIPLE_HEAPS
10041     mark_list_size = min (150*1024, max (8192, soh_segment_size/(2*10*32)));
10042     g_mark_list = make_mark_list (mark_list_size*n_heaps);
10043
10044     min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2;
10045 #ifdef PARALLEL_MARK_LIST_SORT
10046     g_mark_list_copy = make_mark_list (mark_list_size*n_heaps);
10047     if (!g_mark_list_copy)
10048     {
10049         goto cleanup;
10050     }
10051 #endif //PARALLEL_MARK_LIST_SORT
10052
10053 #else //MULTIPLE_HEAPS
10054
10055     mark_list_size = max (8192, soh_segment_size/(64*32));
10056     g_mark_list = make_mark_list (mark_list_size);
10057
10058 #endif //MULTIPLE_HEAPS
10059
10060     dprintf (3, ("mark_list_size: %d", mark_list_size));
10061
10062     if (!g_mark_list)
10063     {
10064         goto cleanup;
10065     }
10066 #endif //MARK_LIST
10067
10068 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10069     if (!seg_mapping_table_init())
10070         goto cleanup;
10071 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10072
10073 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10074     seg_table = sorted_table::make_sorted_table();
10075
10076     if (!seg_table)
10077         goto cleanup;
10078 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10079
10080     segment_standby_list = 0;
10081
10082     if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE))
10083     {
10084         goto cleanup;
10085     }
10086     if (!full_gc_end_event.CreateManualEventNoThrow(FALSE))
10087     {
10088         goto cleanup;
10089     }
10090
10091     fgn_maxgen_percent = 0;
10092     fgn_loh_percent = 0;
10093     full_gc_approach_event_set = false;
10094
10095     memset (full_gc_counts, 0, sizeof (full_gc_counts));
10096
10097     last_gc_index = 0;
10098     should_expand_in_full_gc = FALSE;
10099
10100 #ifdef FEATURE_LOH_COMPACTION
10101     loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0;
10102     loh_compaction_mode = loh_compaction_default;
10103 #endif //FEATURE_LOH_COMPACTION
10104
10105 #ifdef BACKGROUND_GC
10106     memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts));
10107     bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount());
10108     bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin());
10109
10110     {   
10111         int number_bgc_threads = 1;
10112 #ifdef MULTIPLE_HEAPS
10113         number_bgc_threads = n_heaps;
10114 #endif //MULTIPLE_HEAPS
10115         if (!create_bgc_threads_support (number_bgc_threads))
10116         {
10117             goto cleanup;
10118         }
10119     }
10120 #endif //BACKGROUND_GC
10121
10122     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
10123
10124 #ifdef GC_CONFIG_DRIVEN
10125     compact_or_sweep_gcs[0] = 0;
10126     compact_or_sweep_gcs[1] = 0;
10127 #endif //GC_CONFIG_DRIVEN
10128
10129 #ifdef SHORT_PLUGS
10130     short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size));
10131 #endif //SHORT_PLUGS
10132
10133     ret = 1;
10134
10135 cleanup:
10136
10137     if (!ret)
10138     {
10139         if (full_gc_approach_event.IsValid())
10140         {
10141             full_gc_approach_event.CloseEvent();
10142         }
10143         if (full_gc_end_event.IsValid())
10144         {
10145             full_gc_end_event.CloseEvent();
10146         }
10147     }
10148
10149     return ret;
10150 }
10151
10152 gc_heap* gc_heap::make_gc_heap (
10153 #ifdef MULTIPLE_HEAPS
10154                                 GCHeap* vm_hp,
10155                                 int heap_number
10156 #endif //MULTIPLE_HEAPS
10157                                 )
10158 {
10159     gc_heap* res = 0;
10160
10161 #ifdef MULTIPLE_HEAPS
10162     res = new (nothrow) gc_heap;
10163     if (!res)
10164         return 0;
10165
10166     res->vm_heap = vm_hp;
10167     res->alloc_context_count = 0;
10168
10169 #ifdef MARK_LIST
10170 #ifdef PARALLEL_MARK_LIST_SORT
10171     res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps];
10172     if (!res->mark_list_piece_start)
10173         return 0;
10174
10175 #ifdef _PREFAST_ 
10176 #pragma warning(push)
10177 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
10178 #endif // _PREFAST_
10179     res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing
10180 #ifdef _PREFAST_ 
10181 #pragma warning(pop)
10182 #endif // _PREFAST_
10183
10184     if (!res->mark_list_piece_end)
10185         return 0;
10186 #endif //PARALLEL_MARK_LIST_SORT
10187 #endif //MARK_LIST
10188
10189
10190 #endif //MULTIPLE_HEAPS
10191
10192     if (res->init_gc_heap (
10193 #ifdef MULTIPLE_HEAPS
10194         heap_number
10195 #else  //MULTIPLE_HEAPS
10196         0
10197 #endif //MULTIPLE_HEAPS
10198         )==0)
10199     {
10200         return 0;
10201     }
10202
10203 #ifdef MULTIPLE_HEAPS
10204     return res;
10205 #else
10206     return (gc_heap*)1;
10207 #endif //MULTIPLE_HEAPS
10208 }
10209
10210 uint32_t
10211 gc_heap::wait_for_gc_done(int32_t timeOut)
10212 {
10213     bool cooperative_mode = enable_preemptive ();
10214
10215     uint32_t dwWaitResult = NOERROR;
10216
10217     gc_heap* wait_heap = NULL;
10218     while (gc_heap::gc_started)
10219     {       
10220 #ifdef MULTIPLE_HEAPS
10221         wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL, 0))->pGenGCHeap;
10222         dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number));
10223 #endif // MULTIPLE_HEAPS
10224
10225 #ifdef _PREFAST_
10226         PREFIX_ASSUME(wait_heap != NULL);
10227 #endif // _PREFAST_
10228
10229         dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE); 
10230     }
10231     disable_preemptive (cooperative_mode);
10232
10233     return dwWaitResult;
10234 }
10235
10236 void 
10237 gc_heap::set_gc_done()
10238 {
10239     enter_gc_done_event_lock();
10240     if (!gc_done_event_set)
10241     {
10242         gc_done_event_set = true;
10243         dprintf (2, ("heap %d: setting gc_done_event", heap_number));
10244         gc_done_event.Set();
10245     }
10246     exit_gc_done_event_lock();
10247 }
10248
10249 void 
10250 gc_heap::reset_gc_done()
10251 {
10252     enter_gc_done_event_lock();
10253     if (gc_done_event_set)
10254     {
10255         gc_done_event_set = false;
10256         dprintf (2, ("heap %d: resetting gc_done_event", heap_number));
10257         gc_done_event.Reset();
10258     }
10259     exit_gc_done_event_lock();
10260 }
10261
10262 void 
10263 gc_heap::enter_gc_done_event_lock()
10264 {
10265     uint32_t dwSwitchCount = 0;
10266 retry:
10267
10268     if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0)
10269     {
10270         while (gc_done_event_lock >= 0)
10271         {
10272             if  (g_num_processors > 1)
10273             {
10274                 int spin_count = 32 * g_num_processors;
10275                 for (int j = 0; j < spin_count; j++)
10276                 {
10277                     if  (gc_done_event_lock < 0)
10278                         break;
10279                     YieldProcessor();           // indicate to the processor that we are spining
10280                 }
10281                 if  (gc_done_event_lock >= 0)
10282                     GCToOSInterface::YieldThread(++dwSwitchCount);
10283             }
10284             else
10285                 GCToOSInterface::YieldThread(++dwSwitchCount);
10286         }
10287         goto retry;
10288     }
10289 }
10290
10291 void 
10292 gc_heap::exit_gc_done_event_lock()
10293 {
10294     gc_done_event_lock = -1;
10295 }
10296
10297 #ifndef MULTIPLE_HEAPS
10298
10299 #ifdef RECORD_LOH_STATE
10300 int gc_heap::loh_state_index = 0;
10301 gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states];
10302 #endif //RECORD_LOH_STATE
10303
10304 VOLATILE(int32_t) gc_heap::gc_done_event_lock;
10305 VOLATILE(bool) gc_heap::gc_done_event_set;
10306 GCEvent gc_heap::gc_done_event;
10307 #endif //!MULTIPLE_HEAPS
10308 VOLATILE(bool) gc_heap::internal_gc_done;
10309
10310 void gc_heap::add_saved_spinlock_info (
10311             msl_enter_state enter_state, 
10312             msl_take_state take_state)
10313
10314 {
10315 #ifdef SPINLOCK_HISTORY
10316     spinlock_info* current = &last_spinlock_info[spinlock_info_index];
10317
10318     current->enter_state = enter_state;
10319     current->take_state = take_state;
10320     current->thread_id.SetToCurrentThread();
10321
10322     spinlock_info_index++;
10323
10324     assert (spinlock_info_index <= max_saved_spinlock_info);
10325
10326     if (spinlock_info_index >= max_saved_spinlock_info)
10327     {
10328         spinlock_info_index = 0;
10329     }
10330 #else
10331     MAYBE_UNUSED_VAR(enter_state);
10332     MAYBE_UNUSED_VAR(take_state);
10333 #endif //SPINLOCK_HISTORY
10334 }
10335
10336 int
10337 gc_heap::init_gc_heap (int  h_number)
10338 {
10339 #ifdef MULTIPLE_HEAPS
10340
10341     time_bgc_last = 0;
10342
10343 #ifdef SPINLOCK_HISTORY
10344     spinlock_info_index = 0;
10345     memset (last_spinlock_info, 0, sizeof(last_spinlock_info));
10346 #endif //SPINLOCK_HISTORY
10347
10348     // initialize per heap members.
10349     ephemeral_low = (uint8_t*)1;
10350
10351     ephemeral_high = MAX_PTR;
10352
10353     ephemeral_heap_segment = 0;
10354
10355     freeable_large_heap_segment = 0;
10356
10357     condemned_generation_num = 0;
10358
10359     blocking_collection = FALSE;
10360
10361     generation_skip_ratio = 100;
10362
10363     mark_stack_tos = 0;
10364
10365     mark_stack_bos = 0;
10366
10367     mark_stack_array_length = 0;
10368
10369     mark_stack_array = 0;
10370
10371     verify_pinned_queue_p = FALSE;
10372
10373     loh_pinned_queue_tos = 0;
10374
10375     loh_pinned_queue_bos = 0;
10376
10377     loh_pinned_queue_length = 0;
10378
10379     loh_pinned_queue_decay = LOH_PIN_DECAY;
10380
10381     loh_pinned_queue = 0;
10382
10383     min_overflow_address = MAX_PTR;
10384
10385     max_overflow_address = 0;
10386
10387     gen0_bricks_cleared = FALSE;
10388
10389     gen0_must_clear_bricks = 0;
10390
10391     allocation_quantum = CLR_SIZE;
10392
10393     more_space_lock = gc_lock;
10394
10395     ro_segments_in_range = FALSE;
10396
10397     loh_alloc_since_cg = 0;
10398
10399     new_heap_segment = NULL;
10400
10401 #ifdef RECORD_LOH_STATE
10402     loh_state_index = 0;
10403 #endif //RECORD_LOH_STATE
10404 #endif //MULTIPLE_HEAPS
10405
10406 #ifdef MULTIPLE_HEAPS
10407     if (h_number > n_heaps)
10408     {
10409         assert (!"Number of heaps exceeded");
10410         return 0;
10411     }
10412
10413     heap_number = h_number;
10414 #endif //MULTIPLE_HEAPS
10415
10416     memset (&oom_info, 0, sizeof (oom_info));
10417     memset (&fgm_result, 0, sizeof (fgm_result));
10418     if (!gc_done_event.CreateManualEventNoThrow(FALSE))
10419     {
10420         return 0;
10421     }
10422     gc_done_event_lock = -1;
10423     gc_done_event_set = false;
10424
10425 #ifndef SEG_MAPPING_TABLE
10426     if (!gc_heap::seg_table->ensure_space_for_insert ())
10427     {
10428         return 0;
10429     }
10430 #endif //!SEG_MAPPING_TABLE
10431
10432     heap_segment* seg = get_initial_segment (soh_segment_size, h_number);
10433     if (!seg)
10434         return 0;
10435
10436     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg),
10437                               (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
10438                               gc_etw_segment_small_object_heap);
10439     
10440 #ifdef SEG_MAPPING_TABLE
10441     seg_mapping_table_add_segment (seg, __this);
10442 #else //SEG_MAPPING_TABLE
10443     seg_table->insert ((uint8_t*)seg, sdelta);
10444 #endif //SEG_MAPPING_TABLE
10445
10446 #ifdef MULTIPLE_HEAPS
10447     heap_segment_heap (seg) = this;
10448 #endif //MULTIPLE_HEAPS
10449
10450     /* todo: Need a global lock for this */
10451     uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
10452     own_card_table (ct);
10453     card_table = translate_card_table (ct);
10454     /* End of global lock */
10455
10456     brick_table = card_table_brick_table (ct);
10457     highest_address = card_table_highest_address (ct);
10458     lowest_address = card_table_lowest_address (ct);
10459
10460 #ifdef CARD_BUNDLE
10461     card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
10462     assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
10463             card_table_card_bundle_table (ct));
10464 #endif //CARD_BUNDLE
10465
10466 #ifdef MARK_ARRAY
10467     if (gc_can_use_concurrent)
10468         mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
10469     else
10470         mark_array = NULL;
10471 #endif //MARK_ARRAY
10472
10473     uint8_t*  start = heap_segment_mem (seg);
10474
10475     for (int i = 0; i < 1 + max_generation; i++)
10476     {
10477         make_generation (generation_table [ (max_generation - i) ],
10478                          seg, start, 0);
10479         generation_table [(max_generation - i)].gen_num = max_generation - i;
10480         start += Align (min_obj_size);
10481     }
10482
10483     heap_segment_allocated (seg) = start;
10484     alloc_allocated = start;
10485     heap_segment_used (seg) = start - plug_skew;
10486
10487     ephemeral_heap_segment = seg;
10488
10489 #ifndef SEG_MAPPING_TABLE
10490     if (!gc_heap::seg_table->ensure_space_for_insert ())
10491     {
10492         return 0;
10493     }
10494 #endif //!SEG_MAPPING_TABLE
10495     //Create the large segment generation
10496     heap_segment* lseg = get_initial_segment(min_loh_segment_size, h_number);
10497     if (!lseg)
10498         return 0;
10499     lseg->flags |= heap_segment_flags_loh;
10500
10501     FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg),
10502                               (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
10503                               gc_etw_segment_large_object_heap);
10504
10505 #ifdef SEG_MAPPING_TABLE
10506     seg_mapping_table_add_segment (lseg, __this);
10507 #else //SEG_MAPPING_TABLE
10508     seg_table->insert ((uint8_t*)lseg, sdelta);
10509 #endif //SEG_MAPPING_TABLE
10510
10511     generation_table [max_generation].free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST, gen2_alloc_list);
10512     //assign the alloc_list for the large generation 
10513     generation_table [max_generation+1].free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST, loh_alloc_list);
10514     generation_table [max_generation+1].gen_num = max_generation+1;
10515     make_generation (generation_table [max_generation+1],lseg, heap_segment_mem (lseg), 0);
10516     heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
10517     heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
10518
10519     for (int gen_num = 0; gen_num <= 1 + max_generation; gen_num++)
10520     {
10521         generation*  gen = generation_of (gen_num);
10522         make_unused_array (generation_allocation_start (gen), Align (min_obj_size));
10523     }
10524
10525 #ifdef MULTIPLE_HEAPS
10526     heap_segment_heap (lseg) = this;
10527
10528     //initialize the alloc context heap
10529     generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);
10530
10531     //initialize the alloc context heap
10532     generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);
10533
10534 #endif //MULTIPLE_HEAPS
10535
10536     //Do this only once
10537 #ifdef MULTIPLE_HEAPS
10538     if (h_number == 0)
10539 #endif //MULTIPLE_HEAPS
10540     {
10541 #ifndef INTERIOR_POINTERS
10542         //set the brick_table for large objects
10543         //but default value is clearded
10544         //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
10545         //                   (uint8_t*)heap_segment_reserved (lseg));
10546
10547 #else //INTERIOR_POINTERS
10548
10549         //Because of the interior pointer business, we have to clear
10550         //the whole brick table
10551         //but the default value is cleared
10552         // clear_brick_table (lowest_address, highest_address);
10553 #endif //INTERIOR_POINTERS
10554     }
10555
10556     if (!init_dynamic_data())
10557     {
10558         return 0;
10559     }
10560
10561     etw_allocation_running_amount[0] = 0;
10562     etw_allocation_running_amount[1] = 0;
10563
10564     //needs to be done after the dynamic data has been initialized
10565 #ifndef MULTIPLE_HEAPS
10566     allocation_running_amount = dd_min_size (dynamic_data_of (0));
10567 #endif //!MULTIPLE_HEAPS
10568
10569     fgn_last_alloc = dd_min_size (dynamic_data_of (0));
10570
10571     mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]);
10572     if (!arr)
10573         return 0;
10574
10575     make_mark_stack(arr);
10576
10577 #ifdef BACKGROUND_GC
10578     freeable_small_heap_segment = 0;
10579     gchist_index_per_heap = 0;
10580     uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
10581     if (!b_arr)
10582         return 0;
10583
10584     make_background_mark_stack (b_arr);
10585 #endif //BACKGROUND_GC
10586
10587     ephemeral_low = generation_allocation_start(generation_of(max_generation - 1));
10588     ephemeral_high = heap_segment_reserved(ephemeral_heap_segment);
10589     if (heap_number == 0)
10590     {
10591         stomp_write_barrier_initialize(
10592 #ifdef MULTIPLE_HEAPS
10593             reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0)
10594 #else
10595             ephemeral_low, ephemeral_high
10596 #endif //!MULTIPLE_HEAPS
10597         );
10598     }
10599
10600 #ifdef MARK_ARRAY
10601     // why would we clear the mark array for this page? it should be cleared..
10602     // clear the first committed page
10603     //if(gc_can_use_concurrent)
10604     //{
10605     //    clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
10606     //}
10607 #endif //MARK_ARRAY
10608
10609 #ifdef MULTIPLE_HEAPS
10610     //register the heap in the heaps array
10611
10612     if (!create_gc_thread ())
10613         return 0;
10614
10615     g_heaps [heap_number] = this;
10616
10617 #endif //MULTIPLE_HEAPS
10618
10619 #ifdef FEATURE_PREMORTEM_FINALIZATION
10620     HRESULT hr = AllocateCFinalize(&finalize_queue);
10621     if (FAILED(hr))
10622         return 0;
10623 #endif // FEATURE_PREMORTEM_FINALIZATION
10624
10625     max_free_space_items = MAX_NUM_FREE_SPACES;
10626
10627     bestfit_seg = new (nothrow) seg_free_spaces (heap_number);
10628
10629     if (!bestfit_seg)
10630     {
10631         return 0;
10632     }
10633
10634     if (!bestfit_seg->alloc())
10635     {
10636         return 0;
10637     }
10638
10639     last_gc_before_oom = FALSE;
10640
10641 #ifdef MULTIPLE_HEAPS
10642
10643 #ifdef HEAP_ANALYZE
10644
10645     heap_analyze_success = TRUE;
10646
10647     internal_root_array  = 0;
10648
10649     internal_root_array_index = 0;
10650
10651     internal_root_array_length = initial_internal_roots;
10652
10653     current_obj          = 0;
10654
10655     current_obj_size     = 0;
10656
10657 #endif //HEAP_ANALYZE
10658
10659 #endif // MULTIPLE_HEAPS
10660
10661 #ifdef BACKGROUND_GC
10662     bgc_thread_id.Clear();
10663
10664     if (!create_bgc_thread_support())
10665     {
10666         return 0;
10667     }
10668
10669     bgc_alloc_lock = new (nothrow) exclusive_sync;
10670     if (!bgc_alloc_lock)
10671     {
10672         return 0;
10673     }
10674
10675     bgc_alloc_lock->init();
10676
10677     if (h_number == 0)
10678     {
10679         if (!recursive_gc_sync::init())
10680             return 0;
10681     }
10682
10683     bgc_thread_running = 0;
10684     bgc_thread = 0;
10685     bgc_threads_timeout_cs.Initialize();
10686     expanded_in_fgc = 0;
10687     current_bgc_state = bgc_not_in_process;
10688     background_soh_alloc_count = 0;
10689     background_loh_alloc_count = 0;
10690     bgc_overflow_count = 0;
10691     end_loh_size = dd_min_size (dynamic_data_of (max_generation + 1));
10692 #endif //BACKGROUND_GC
10693
10694 #ifdef GC_CONFIG_DRIVEN
10695     memset (interesting_data_per_heap, 0, sizeof (interesting_data_per_heap));
10696     memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap));
10697     memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap));
10698     memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap));
10699 #endif //GC_CONFIG_DRIVEN
10700
10701     return 1;
10702 }
10703
10704 void
10705 gc_heap::destroy_semi_shared()
10706 {
10707 //TODO: will need to move this to per heap
10708 //#ifdef BACKGROUND_GC
10709 //    if (c_mark_list)
10710 //        delete c_mark_list;
10711 //#endif //BACKGROUND_GC
10712
10713 #ifdef MARK_LIST
10714     if (g_mark_list)
10715         delete g_mark_list;
10716 #endif //MARK_LIST
10717
10718 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10719     if (seg_mapping_table)
10720         delete seg_mapping_table;
10721 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10722
10723 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10724     //destroy the segment map
10725     seg_table->delete_sorted_table();
10726 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10727 }
10728
10729 void
10730 gc_heap::self_destroy()
10731 {
10732 #ifdef BACKGROUND_GC
10733     kill_gc_thread();
10734 #endif //BACKGROUND_GC
10735
10736     if (gc_done_event.IsValid())
10737     {
10738         gc_done_event.CloseEvent();
10739     }
10740
10741     // destroy every segment.
10742     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
10743
10744     PREFIX_ASSUME(seg != NULL);
10745
10746     heap_segment* next_seg;
10747     while (seg)
10748     {
10749         next_seg = heap_segment_next_rw (seg);
10750         delete_heap_segment (seg);
10751         seg = next_seg;
10752     }
10753
10754     seg = heap_segment_rw (generation_start_segment (generation_of (max_generation+1)));
10755
10756     PREFIX_ASSUME(seg != NULL);
10757
10758     while (seg)
10759     {
10760         next_seg = heap_segment_next_rw (seg);
10761         delete_heap_segment (seg);
10762         seg = next_seg;
10763     }
10764
10765     // get rid of the card table
10766     release_card_table (card_table);
10767
10768     // destroy the mark stack
10769     delete mark_stack_array;
10770
10771 #ifdef FEATURE_PREMORTEM_FINALIZATION
10772     if (finalize_queue)
10773         delete finalize_queue;
10774 #endif // FEATURE_PREMORTEM_FINALIZATION
10775 }
10776
10777 void
10778 gc_heap::destroy_gc_heap(gc_heap* heap)
10779 {
10780     heap->self_destroy();
10781     delete heap;
10782 }
10783
10784 // Destroys resources owned by gc. It is assumed that a last GC has been performed and that
10785 // the finalizer queue has been drained.
10786 void gc_heap::shutdown_gc()
10787 {
10788     destroy_semi_shared();
10789
10790 #ifdef MULTIPLE_HEAPS
10791     //delete the heaps array
10792     delete g_heaps;
10793     destroy_thread_support();
10794     n_heaps = 0;
10795 #endif //MULTIPLE_HEAPS
10796     //destroy seg_manager
10797
10798     destroy_initial_memory();
10799
10800     GCToOSInterface::Shutdown();
10801 }
10802
10803 inline
10804 BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10805                           uint8_t* old_loc, int use_padding)
10806 {
10807     BOOL already_padded = FALSE;
10808 #ifdef SHORT_PLUGS
10809     if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT))
10810     {
10811         alloc_pointer = alloc_pointer + Align (min_obj_size);
10812         already_padded = TRUE;
10813     }
10814 #endif //SHORT_PLUGS
10815
10816     if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer)))
10817         size = size + switch_alignment_size (already_padded);
10818
10819 #ifdef FEATURE_STRUCTALIGN
10820     alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset);
10821 #endif // FEATURE_STRUCTALIGN
10822
10823     // in allocate_in_condemned_generation we can have this when we
10824     // set the alloc_limit to plan_allocated which could be less than 
10825     // alloc_ptr
10826     if (alloc_limit < alloc_pointer)
10827     {
10828         return FALSE;
10829     }
10830
10831     if (old_loc != 0)
10832     {
10833         return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0))) 
10834 #ifdef SHORT_PLUGS
10835                 ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit))
10836 #else //SHORT_PLUGS
10837                 ||((alloc_pointer + size) == alloc_limit)
10838 #endif //SHORT_PLUGS
10839             );
10840     }
10841     else
10842     {
10843         assert (size == Align (min_obj_size));
10844         return ((size_t)(alloc_limit - alloc_pointer) >= size);
10845     }
10846 }
10847
10848 inline
10849 BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10850                             int align_const)
10851 {
10852     // We could have run into cases where this is true when alloc_allocated is the 
10853     // the same as the seg committed.
10854     if (alloc_limit < alloc_pointer)
10855     {
10856         return FALSE;
10857     }
10858
10859     return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const)));
10860 }
10861
10862 // Grow by committing more pages
10863 BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address)
10864 {
10865     assert (high_address <= heap_segment_reserved (seg));
10866
10867     //return 0 if we are at the end of the segment.
10868     if (align_on_page (high_address) > heap_segment_reserved (seg))
10869         return FALSE;
10870
10871     if (high_address <= heap_segment_committed (seg))
10872         return TRUE;
10873
10874     size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg)));
10875     c_size = max (c_size, 16*OS_PAGE_SIZE);
10876     c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg)));
10877
10878     if (c_size == 0)
10879         return FALSE;
10880
10881     STRESS_LOG2(LF_GC, LL_INFO10000,
10882                 "Growing heap_segment: %Ix high address: %Ix\n",
10883                 (size_t)seg, (size_t)high_address);
10884
10885     dprintf(3, ("Growing segment allocation %Ix %Ix", (size_t)heap_segment_committed(seg),c_size));
10886     
10887     if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size, heap_number))
10888     {
10889         dprintf(3, ("Cannot grow heap segment"));
10890         return FALSE;
10891     }
10892 #ifdef MARK_ARRAY
10893 #ifndef BACKGROUND_GC
10894     clear_mark_array (heap_segment_committed (seg),
10895                       heap_segment_committed (seg)+c_size, TRUE);
10896 #endif //BACKGROUND_GC
10897 #endif //MARK_ARRAY
10898     heap_segment_committed (seg) += c_size;
10899     STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix",
10900                 (size_t)heap_segment_committed (seg));
10901
10902     assert (heap_segment_committed (seg) <= heap_segment_reserved (seg));
10903
10904     assert (high_address <= heap_segment_committed (seg));
10905
10906     return TRUE;
10907 }
10908
10909 inline
10910 int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p  REQD_ALIGN_AND_OFFSET_DCL)
10911 {
10912 #ifdef SHORT_PLUGS
10913     if ((old_loc != 0) && pad_front_p)
10914     {
10915         allocated = allocated + Align (min_obj_size);
10916     }
10917 #endif //SHORT_PLUGS
10918
10919     if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated)))
10920         size = size + switch_alignment_size (FALSE);
10921 #ifdef FEATURE_STRUCTALIGN
10922     size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset);
10923     return grow_heap_segment (seg, allocated + pad + size);
10924 #else // FEATURE_STRUCTALIGN
10925     return grow_heap_segment (seg, allocated + size);
10926 #endif // FEATURE_STRUCTALIGN
10927 }
10928
10929 //used only in older generation allocation (i.e during gc).
10930 void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
10931                             int gennum)
10932 {
10933     UNREFERENCED_PARAMETER(gennum);
10934     dprintf (3, ("gc Expanding segment allocation"));
10935     heap_segment* seg = generation_allocation_segment (gen);
10936     if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
10937     {
10938         if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg))
10939         {
10940             assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg));
10941             assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg));
10942             heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
10943         }
10944         else
10945         {
10946             uint8_t*  hole = generation_allocation_pointer (gen);
10947             size_t  size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
10948
10949             if (size != 0)
10950             {
10951                 dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size));
10952                 size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen);
10953                 if (size >= Align (min_free_list))
10954                 {
10955                     if (allocated_size < min_free_list)
10956                     {
10957                         if (size >= (Align (min_free_list) + Align (min_obj_size)))
10958                         {
10959                             //split hole into min obj + threadable free item
10960                             make_unused_array (hole, min_obj_size);
10961                             generation_free_obj_space (gen) += Align (min_obj_size);
10962                             make_unused_array (hole + Align (min_obj_size), size - Align (min_obj_size));
10963                             generation_free_list_space (gen) += size - Align (min_obj_size);
10964                             generation_allocator(gen)->thread_item_front (hole + Align (min_obj_size), 
10965                                                                           size - Align (min_obj_size));
10966                             add_gen_free (gen->gen_num, (size - Align (min_obj_size)));
10967                         }
10968                         else
10969                         {
10970                             dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size));
10971                             make_unused_array (hole, size);
10972                             generation_free_obj_space (gen) += size;
10973                         }
10974                     }
10975                     else 
10976                     {
10977                         dprintf (3, ("threading hole in front of free list"));
10978                         make_unused_array (hole, size);
10979                         generation_free_list_space (gen) += size;
10980                         generation_allocator(gen)->thread_item_front (hole, size);
10981                         add_gen_free (gen->gen_num, size);
10982                     }
10983                 }
10984                 else
10985                 {
10986                     make_unused_array (hole, size);
10987                     generation_free_obj_space (gen) += size;
10988                 }
10989             }
10990         }
10991         generation_allocation_pointer (gen) = start;
10992         generation_allocation_context_start_region (gen) = start;
10993     }
10994     generation_allocation_limit (gen) = (start + limit_size);
10995 }
10996
10997 void verify_mem_cleared (uint8_t* start, size_t size)
10998 {
10999     if (!Aligned (size))
11000     {
11001         FATAL_GC_ERROR();
11002     }
11003
11004     PTR_PTR curr_ptr = (PTR_PTR) start;
11005     for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
11006     {
11007         if (*(curr_ptr++) != 0)
11008         {
11009             FATAL_GC_ERROR();
11010         }
11011     }
11012 }
11013
11014 #if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
11015 void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11016 {
11017     size_t start_mark_bit = mark_bit_of (start);
11018     size_t end_mark_bit = mark_bit_of (end);
11019     unsigned int startbit = mark_bit_bit (start_mark_bit);
11020     unsigned int endbit = mark_bit_bit (end_mark_bit);
11021     size_t startwrd = mark_bit_word (start_mark_bit);
11022     size_t endwrd = mark_bit_word (end_mark_bit);
11023
11024     dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
11025         (size_t)start, (size_t)start_mark_bit, 
11026         (size_t)end, (size_t)end_mark_bit));
11027
11028     unsigned int firstwrd = ~(lowbits (~0, startbit));
11029     unsigned int lastwrd = ~(highbits (~0, endbit));
11030
11031     if (startwrd == endwrd)
11032     {
11033         unsigned int wrd = firstwrd & lastwrd;
11034         mark_array[startwrd] |= wrd;
11035         return;
11036     }
11037
11038     // set the first mark word.
11039     if (startbit)
11040     {
11041         mark_array[startwrd] |= firstwrd;
11042         startwrd++;
11043     }
11044
11045     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11046     {
11047         mark_array[wrdtmp] = ~(unsigned int)0;
11048     }
11049
11050     // set the last mark word.
11051     if (endbit)
11052     {
11053         mark_array[endwrd] |= lastwrd;
11054     }
11055 }
11056
11057 // makes sure that the mark array bits between start and end are 0.
11058 void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11059 {
11060     size_t start_mark_bit = mark_bit_of (start);
11061     size_t end_mark_bit = mark_bit_of (end);
11062     unsigned int startbit = mark_bit_bit (start_mark_bit);
11063     unsigned int endbit = mark_bit_bit (end_mark_bit);
11064     size_t startwrd = mark_bit_word (start_mark_bit);
11065     size_t endwrd = mark_bit_word (end_mark_bit);
11066
11067     //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix", 
11068     //    (size_t)start, (size_t)start_mark_bit, 
11069     //    (size_t)end, (size_t)end_mark_bit));
11070
11071     unsigned int firstwrd = ~(lowbits (~0, startbit));
11072     unsigned int lastwrd = ~(highbits (~0, endbit));
11073
11074     if (startwrd == endwrd)
11075     {
11076         unsigned int wrd = firstwrd & lastwrd;
11077         if (mark_array[startwrd] & wrd)
11078         {
11079             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11080                             wrd, startwrd, 
11081                             mark_array [startwrd], mark_word_address (startwrd)));
11082             FATAL_GC_ERROR();
11083         }
11084         return;
11085     }
11086
11087     // set the first mark word.
11088     if (startbit)
11089     {
11090         if (mark_array[startwrd] & firstwrd)
11091         {
11092             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11093                             firstwrd, startwrd, 
11094                             mark_array [startwrd], mark_word_address (startwrd)));
11095             FATAL_GC_ERROR();
11096         }
11097
11098         startwrd++;
11099     }
11100
11101     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11102     {
11103         if (mark_array[wrdtmp])
11104         {
11105             dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11106                             wrdtmp, 
11107                             mark_array [wrdtmp], mark_word_address (wrdtmp)));
11108             FATAL_GC_ERROR();
11109         }
11110     }
11111
11112     // set the last mark word.
11113     if (endbit)
11114     {
11115         if (mark_array[endwrd] & lastwrd)
11116         {
11117             dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
11118                             lastwrd, lastwrd, 
11119                             mark_array [lastwrd], mark_word_address (lastwrd)));
11120             FATAL_GC_ERROR();
11121         }
11122     }
11123 }
11124 #endif //VERIFY_HEAP && BACKGROUND_GC
11125
11126 allocator::allocator (unsigned int num_b, size_t fbs, alloc_list* b)
11127 {
11128     assert (num_b < MAX_BUCKET_COUNT);
11129     num_buckets = num_b;
11130     frst_bucket_size = fbs;
11131     buckets = b;
11132 }
11133
11134 alloc_list& allocator::alloc_list_of (unsigned int bn)
11135 {
11136     assert (bn < num_buckets);
11137     if (bn == 0)
11138         return first_bucket;
11139     else
11140         return buckets [bn-1];
11141 }
11142
11143 size_t& allocator::alloc_list_damage_count_of (unsigned int bn)
11144 {
11145     assert (bn < num_buckets);
11146     if (bn == 0)
11147         return first_bucket.alloc_list_damage_count();
11148     else
11149         return buckets [bn-1].alloc_list_damage_count();
11150 }
11151
11152 void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p)
11153 {
11154     //unlink the free_item
11155     alloc_list* al = &alloc_list_of (bn);
11156     if (prev_item)
11157     {
11158         if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY))
11159         {
11160             assert (item == free_list_slot (prev_item));
11161             free_list_undo (prev_item) = item;
11162             alloc_list_damage_count_of (bn)++;
11163         }
11164         free_list_slot (prev_item) = free_list_slot(item);
11165     }
11166     else
11167     {
11168         al->alloc_list_head() = (uint8_t*)free_list_slot(item);
11169     }
11170     if (al->alloc_list_tail() == item)
11171     {
11172         al->alloc_list_tail() = prev_item;
11173     }
11174 }
11175
11176 void allocator::clear()
11177 {
11178     for (unsigned int i = 0; i < num_buckets; i++)
11179     {
11180         alloc_list_head_of (i) = 0;
11181         alloc_list_tail_of (i) = 0;
11182     }
11183 }
11184
11185 //always thread to the end.
11186 void allocator::thread_free_item (uint8_t* item, uint8_t*& head, uint8_t*& tail)
11187 {
11188     free_list_slot (item) = 0;
11189     free_list_undo (item) = UNDO_EMPTY;
11190     assert (item != head);
11191
11192     if (head == 0)
11193     {
11194        head = item;
11195     }
11196     //TODO: This shouldn't happen anymore - verify that's the case.
11197     //the following is necessary because the last free element
11198     //may have been truncated, and tail isn't updated.
11199     else if (free_list_slot (head) == 0)
11200     {
11201         free_list_slot (head) = item;
11202     }
11203     else
11204     {
11205         assert (item != tail);
11206         assert (free_list_slot(tail) == 0);
11207         free_list_slot (tail) = item;
11208     }
11209     tail = item;
11210 }
11211
11212 void allocator::thread_item (uint8_t* item, size_t size)
11213 {
11214     size_t sz = frst_bucket_size;
11215     unsigned int a_l_number = 0; 
11216
11217     for (; a_l_number < (num_buckets-1); a_l_number++)
11218     {
11219         if (size < sz)
11220         {
11221             break;
11222         }
11223         sz = sz * 2;
11224     }
11225     alloc_list* al = &alloc_list_of (a_l_number);
11226     thread_free_item (item, 
11227                       al->alloc_list_head(),
11228                       al->alloc_list_tail());
11229 }
11230
11231 void allocator::thread_item_front (uint8_t* item, size_t size)
11232 {
11233     //find right free list
11234     size_t sz = frst_bucket_size;
11235     unsigned int a_l_number = 0; 
11236     for (; a_l_number < (num_buckets-1); a_l_number++)
11237     {
11238         if (size < sz)
11239         {
11240             break;
11241         }
11242         sz = sz * 2;
11243     }
11244     alloc_list* al = &alloc_list_of (a_l_number);
11245     free_list_slot (item) = al->alloc_list_head();
11246     free_list_undo (item) = UNDO_EMPTY;
11247
11248     if (al->alloc_list_tail() == 0)
11249     {
11250         al->alloc_list_tail() = al->alloc_list_head();
11251     }
11252     al->alloc_list_head() = item;
11253     if (al->alloc_list_tail() == 0)
11254     {
11255         al->alloc_list_tail() = item;
11256     }
11257 }
11258
11259 void allocator::copy_to_alloc_list (alloc_list* toalist)
11260 {
11261     for (unsigned int i = 0; i < num_buckets; i++)
11262     {
11263         toalist [i] = alloc_list_of (i);
11264 #ifdef FL_VERIFICATION
11265         uint8_t* free_item = alloc_list_head_of (i);
11266         size_t count = 0;
11267         while (free_item)
11268         {
11269             count++;
11270             free_item = free_list_slot (free_item);
11271         }
11272
11273         toalist[i].item_count = count;
11274 #endif //FL_VERIFICATION
11275     }
11276 }
11277
11278 void allocator::copy_from_alloc_list (alloc_list* fromalist)
11279 {
11280     BOOL repair_list = !discard_if_no_fit_p ();
11281     for (unsigned int i = 0; i < num_buckets; i++)
11282     {
11283         size_t count = alloc_list_damage_count_of (i);
11284         alloc_list_of (i) = fromalist [i];
11285         assert (alloc_list_damage_count_of (i) == 0);
11286
11287         if (repair_list)
11288         {
11289             //repair the the list
11290             //new items may have been added during the plan phase 
11291             //items may have been unlinked. 
11292             uint8_t* free_item = alloc_list_head_of (i);
11293             while (free_item && count)
11294             {
11295                 assert (((CObjectHeader*)free_item)->IsFree());
11296                 if ((free_list_undo (free_item) != UNDO_EMPTY))
11297                 {
11298                     count--;
11299                     free_list_slot (free_item) = free_list_undo (free_item);
11300                     free_list_undo (free_item) = UNDO_EMPTY;
11301                 }
11302
11303                 free_item = free_list_slot (free_item);
11304             }
11305
11306 #ifdef FL_VERIFICATION
11307             free_item = alloc_list_head_of (i);
11308             size_t item_count = 0;
11309             while (free_item)
11310             {
11311                 item_count++;
11312                 free_item = free_list_slot (free_item);
11313             }
11314
11315             assert (item_count == alloc_list_of (i).item_count);
11316 #endif //FL_VERIFICATION
11317         }
11318 #ifdef DEBUG
11319         uint8_t* tail_item = alloc_list_tail_of (i);
11320         assert ((tail_item == 0) || (free_list_slot (tail_item) == 0));
11321 #endif
11322     }
11323 }
11324
11325 void allocator::commit_alloc_list_changes()
11326 {
11327     BOOL repair_list = !discard_if_no_fit_p ();
11328     if (repair_list)
11329     {
11330         for (unsigned int i = 0; i < num_buckets; i++)
11331         {
11332             //remove the undo info from list. 
11333             uint8_t* free_item = alloc_list_head_of (i);
11334             size_t count = alloc_list_damage_count_of (i);
11335             while (free_item && count)
11336             {
11337                 assert (((CObjectHeader*)free_item)->IsFree());
11338
11339                 if (free_list_undo (free_item) != UNDO_EMPTY)
11340                 {
11341                     free_list_undo (free_item) = UNDO_EMPTY;
11342                     count--;
11343                 }
11344
11345                 free_item = free_list_slot (free_item);
11346             }
11347
11348             alloc_list_damage_count_of (i) = 0; 
11349         }
11350     }
11351 }
11352
11353 void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
11354                                 alloc_context* acontext, heap_segment* seg,
11355                                 int align_const, int gen_number)
11356 {
11357     size_t aligned_min_obj_size = Align(min_obj_size, align_const);
11358
11359     //probably should pass seg==0 for free lists.
11360     if (seg)
11361     {
11362         assert (heap_segment_used (seg) <= heap_segment_committed (seg));
11363     }
11364
11365     dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start,
11366                (size_t)start + limit_size - aligned_min_obj_size));
11367
11368     if ((acontext->alloc_limit != start) &&
11369         (acontext->alloc_limit + aligned_min_obj_size)!= start)
11370     {
11371         uint8_t*  hole = acontext->alloc_ptr;
11372         if (hole != 0)
11373         {
11374             size_t  size = (acontext->alloc_limit - acontext->alloc_ptr);
11375             dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + size + Align (min_obj_size, align_const)));
11376             // when we are finishing an allocation from a free list
11377             // we know that the free area was Align(min_obj_size) larger
11378             acontext->alloc_bytes -= size;
11379             size_t free_obj_size = size + aligned_min_obj_size;
11380             make_unused_array (hole, free_obj_size);
11381             generation_free_obj_space (generation_of (gen_number)) += free_obj_size;
11382         }
11383         acontext->alloc_ptr = start;
11384     }
11385     else  
11386     {  
11387         // If the next alloc context is right up against the current one it means we are absorbing the min  
11388         // object, so need to account for that.  
11389         acontext->alloc_bytes += (start - acontext->alloc_limit);  
11390     }  
11391
11392     acontext->alloc_limit = (start + limit_size - aligned_min_obj_size);
11393     acontext->alloc_bytes += limit_size - ((gen_number < max_generation + 1) ? aligned_min_obj_size : 0);
11394
11395 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
11396     if (g_fEnableARM)
11397     {
11398         AppDomain* alloc_appdomain = GetAppDomain();
11399         alloc_appdomain->RecordAllocBytes (limit_size, heap_number);
11400     }
11401 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
11402
11403     uint8_t* saved_used = 0;
11404
11405     if (seg)
11406     {
11407         saved_used = heap_segment_used (seg);
11408     }
11409
11410     if (seg == ephemeral_heap_segment)
11411     {
11412         //Sometimes the allocated size is advanced without clearing the
11413         //memory. Let's catch up here
11414         if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
11415         {
11416 #ifdef MARK_ARRAY
11417 #ifndef BACKGROUND_GC
11418             clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
11419 #endif //BACKGROUND_GC
11420 #endif //MARK_ARRAY
11421             heap_segment_used (seg) = alloc_allocated - plug_skew;
11422         }
11423     }
11424 #ifdef BACKGROUND_GC
11425     else if (seg)
11426     {
11427         uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size;
11428 #ifdef FEATURE_LOH_COMPACTION
11429         old_allocated -= Align (loh_padding_obj_size, align_const);
11430 #endif //FEATURE_LOH_COMPACTION
11431
11432         assert (heap_segment_used (seg) >= old_allocated);
11433     }
11434 #endif //BACKGROUND_GC
11435     if ((seg == 0) ||
11436         (start - plug_skew + limit_size) <= heap_segment_used (seg))
11437     {
11438         dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear memory(1)", heap_number));
11439         add_saved_spinlock_info (me_release, mt_clr_mem);
11440         leave_spin_lock (&more_space_lock);
11441         dprintf (3, ("clearing memory at %Ix for %d bytes", (start - plug_skew), limit_size));
11442         memclr (start - plug_skew, limit_size);
11443     }
11444     else
11445     {
11446         uint8_t* used = heap_segment_used (seg);
11447         heap_segment_used (seg) = start + limit_size - plug_skew;
11448
11449         dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear memory", heap_number));
11450         add_saved_spinlock_info (me_release, mt_clr_mem);
11451         leave_spin_lock (&more_space_lock);
11452         if ((start - plug_skew) < used)
11453         {
11454             if (used != saved_used)
11455             {
11456                 FATAL_GC_ERROR ();
11457             }
11458
11459             dprintf (2, ("clearing memory before used at %Ix for %Id bytes", 
11460                 (start - plug_skew), (plug_skew + used - start)));
11461             memclr (start - plug_skew, used - (start - plug_skew));
11462         }
11463     }
11464
11465     //this portion can be done after we release the lock
11466     if (seg == ephemeral_heap_segment)
11467     {
11468 #ifdef FFIND_OBJECT
11469         if (gen0_must_clear_bricks > 0)
11470         {
11471             //set the brick table to speed up find_object
11472             size_t b = brick_of (acontext->alloc_ptr);
11473             set_brick (b, acontext->alloc_ptr - brick_address (b));
11474             b++;
11475             dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[",
11476                          b, brick_of (align_on_brick (start + limit_size))));
11477             volatile short* x = &brick_table [b];
11478             short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))];
11479
11480             for (;x < end_x;x++)
11481                 *x = -1;
11482         }
11483         else
11484 #endif //FFIND_OBJECT
11485         {
11486             gen0_bricks_cleared = FALSE;
11487         }
11488     }
11489
11490     // verifying the memory is completely cleared.
11491     //verify_mem_cleared (start - plug_skew, limit_size);
11492 }
11493
11494 /* in order to make the allocator faster, allocate returns a
11495  * 0 filled object. Care must be taken to set the allocation limit to the
11496  * allocation pointer after gc
11497  */
11498
11499 size_t gc_heap::limit_from_size (size_t size, size_t room, int gen_number,
11500                                  int align_const)
11501 {
11502     size_t new_limit = new_allocation_limit ((size + Align (min_obj_size, align_const)),
11503                                              min (room,max (size + Align (min_obj_size, align_const),
11504                                                             ((gen_number < max_generation+1) ?
11505                                                              allocation_quantum :
11506                                                              0))),
11507                                              gen_number);
11508     assert (new_limit >= (size + Align (min_obj_size, align_const)));
11509     dprintf (100, ("requested to allocate %Id bytes, actual size is %Id", size, new_limit));
11510     return new_limit;
11511 }
11512
11513 void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size, 
11514                           uint8_t* allocated, uint8_t* reserved)
11515 {
11516     dprintf (1, ("total committed on the heap is %Id", get_total_committed_size()));
11517
11518     UNREFERENCED_PARAMETER(heap_num);
11519
11520     if (reason == oom_budget)
11521     {
11522         alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
11523     }
11524
11525     if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure)))
11526     {
11527         // This means during the last GC we needed to reserve and/or commit more memory
11528         // but we couldn't. We proceeded with the GC and ended up not having enough
11529         // memory at the end. This is a legitimate OOM situtation. Otherwise we 
11530         // probably made a mistake and didn't expand the heap when we should have.
11531         reason = oom_low_mem;
11532     }
11533
11534     oom_info.reason = reason;
11535     oom_info.allocated = allocated;
11536     oom_info.reserved = reserved;
11537     oom_info.alloc_size = alloc_size;
11538     oom_info.gc_index = settings.gc_index;
11539     oom_info.fgm = fgm_result.fgm;
11540     oom_info.size = fgm_result.size;
11541     oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb;
11542     oom_info.loh_p = fgm_result.loh_p;
11543
11544     fgm_result.fgm = fgm_no_failure;
11545
11546     // Break early - before the more_space_lock is release so no other threads
11547     // could have allocated on the same heap when OOM happened.
11548     if (GCConfig::GetBreakOnOOM())
11549     {
11550         GCToOSInterface::DebugBreak();
11551     }
11552 }
11553
11554 #ifdef BACKGROUND_GC
11555 BOOL gc_heap::background_allowed_p()
11556 {
11557     return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) );
11558 }
11559 #endif //BACKGROUND_GC
11560
11561 void gc_heap::check_for_full_gc (int gen_num, size_t size)
11562 {
11563     BOOL should_notify = FALSE;
11564     // if we detect full gc because of the allocation budget specified this is TRUE;
11565     // it's FALSE if it's due to other factors.
11566     BOOL alloc_factor = TRUE; 
11567     int i = 0;
11568     int n = 0;
11569     int n_initial = gen_num;
11570     BOOL local_blocking_collection = FALSE;
11571     BOOL local_elevation_requested = FALSE;
11572     int new_alloc_remain_percent = 0;
11573
11574     if (full_gc_approach_event_set)
11575     {
11576         return;
11577     }
11578     
11579     if (gen_num != (max_generation + 1))
11580     {
11581         gen_num = max_generation;
11582     }
11583
11584     dynamic_data* dd_full = dynamic_data_of (gen_num);
11585     ptrdiff_t new_alloc_remain = 0;
11586     uint32_t pct = ((gen_num == (max_generation + 1)) ? fgn_loh_percent : fgn_maxgen_percent);
11587
11588     for (int gen_index = 0; gen_index <= (max_generation + 1); gen_index++)
11589     {
11590         dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)", 
11591                      heap_number, gen_index,
11592                      dd_new_allocation (dynamic_data_of (gen_index)),
11593                      dd_desired_allocation (dynamic_data_of (gen_index))));
11594     }
11595
11596     // For small object allocations we only check every fgn_check_quantum bytes.
11597     if (n_initial == 0)
11598     {
11599         dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc));
11600         dynamic_data* dd_0 = dynamic_data_of (n_initial);
11601         if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) &&
11602             (dd_new_allocation (dd_0) >= 0))
11603         {
11604             return;
11605         }
11606         else
11607         {
11608             fgn_last_alloc = dd_new_allocation (dd_0);
11609             dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc));
11610         }
11611
11612         // We don't consider the size that came from soh 'cause it doesn't contribute to the
11613         // gen2 budget.
11614         size = 0;
11615     }
11616
11617     for (i = n+1; i <= max_generation; i++)
11618     {
11619         if (get_new_allocation (i) <= 0)
11620         {
11621             n = min (i, max_generation);
11622         }
11623         else
11624             break;
11625     }
11626
11627     dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n));
11628     if (gen_num == max_generation)
11629     {
11630         // If it's small object heap we should first see if we will even be looking at gen2 budget
11631         // in the next GC or not. If not we should go directly to checking other factors.
11632         if (n < (max_generation - 1))
11633         {
11634             goto check_other_factors;
11635         }
11636     }
11637
11638     new_alloc_remain = dd_new_allocation (dd_full) - size;
11639
11640     new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100);
11641
11642     dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%", 
11643                  gen_num, pct, new_alloc_remain_percent));
11644
11645     if (new_alloc_remain_percent <= (int)pct)
11646     {
11647 #ifdef BACKGROUND_GC
11648         // If background GC is enabled, we still want to check whether this will
11649         // be a blocking GC or not because we only want to notify when it's a 
11650         // blocking full GC.
11651         if (background_allowed_p())
11652         {
11653             goto check_other_factors;
11654         }
11655 #endif //BACKGROUND_GC
11656
11657         should_notify = TRUE;
11658         goto done;
11659     }
11660
11661 check_other_factors:
11662
11663     dprintf (2, ("FGC: checking other factors"));
11664     n = generation_to_condemn (n, 
11665                                &local_blocking_collection, 
11666                                &local_elevation_requested, 
11667                                TRUE);
11668
11669     if (local_elevation_requested && (n == max_generation))
11670     {
11671         if (settings.should_lock_elevation)
11672         {
11673             int local_elevation_locked_count = settings.elevation_locked_count + 1;
11674             if (local_elevation_locked_count != 6)
11675             {
11676                 dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1", 
11677                     local_elevation_locked_count));
11678                 n = max_generation - 1;
11679             }
11680         }
11681     }
11682
11683     dprintf (2, ("FGN: we estimate gen%d will be collected", n));
11684
11685 #ifdef BACKGROUND_GC
11686     // When background GC is enabled it decreases the accuracy of our predictability -
11687     // by the time the GC happens, we may not be under BGC anymore. If we try to 
11688     // predict often enough it should be ok.
11689     if ((n == max_generation) &&
11690         (recursive_gc_sync::background_running_p()))
11691     {
11692         n = max_generation - 1;
11693         dprintf (2, ("FGN: bgc - 1 instead of 2"));
11694     }
11695
11696     if ((n == max_generation) && !local_blocking_collection)
11697     {
11698         if (!background_allowed_p())
11699         {
11700             local_blocking_collection = TRUE;
11701         }
11702     }
11703 #endif //BACKGROUND_GC
11704
11705     dprintf (2, ("FGN: we estimate gen%d will be collected: %s", 
11706                        n, 
11707                        (local_blocking_collection ? "blocking" : "background")));
11708
11709     if ((n == max_generation) && local_blocking_collection)
11710     {
11711         alloc_factor = FALSE;
11712         should_notify = TRUE;
11713         goto done;
11714     }
11715
11716 done:
11717
11718     if (should_notify)
11719     {
11720         dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)", 
11721                      n_initial,
11722                      (alloc_factor ? "alloc" : "other"),
11723                      dd_collection_count (dynamic_data_of (0)),
11724                      new_alloc_remain_percent, 
11725                      gen_num));
11726
11727         send_full_gc_notification (n_initial, alloc_factor);
11728     }
11729 }
11730
11731 void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p)
11732 {
11733     if (!full_gc_approach_event_set)
11734     {
11735         assert (full_gc_approach_event.IsValid());
11736         FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p);
11737
11738         full_gc_end_event.Reset();
11739         full_gc_approach_event.Set();
11740         full_gc_approach_event_set = true;
11741     }
11742 }
11743
11744 wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms)
11745 {
11746     if (fgn_maxgen_percent == 0)
11747     {
11748         return wait_full_gc_na;
11749     }
11750
11751     uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms);
11752
11753     if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT))
11754     {
11755         if (fgn_maxgen_percent == 0)
11756         {
11757             return wait_full_gc_cancelled;
11758         }
11759         
11760         if (wait_result == WAIT_OBJECT_0)
11761         {
11762 #ifdef BACKGROUND_GC
11763             if (fgn_last_gc_was_concurrent)
11764             {
11765                 fgn_last_gc_was_concurrent = FALSE;
11766                 return wait_full_gc_na;
11767             }
11768             else
11769 #endif //BACKGROUND_GC
11770             {
11771                 return wait_full_gc_success;
11772             }
11773         }
11774         else
11775         {
11776             return wait_full_gc_timeout;
11777         }
11778     }
11779     else
11780     {
11781         return wait_full_gc_failed;
11782     }
11783 }
11784
11785 size_t gc_heap::get_full_compact_gc_count()
11786 {
11787     return full_gc_counts[gc_type_compacting];
11788 }
11789
11790 // DTREVIEW - we should check this in dt_low_ephemeral_space_p
11791 // as well.
11792 inline
11793 BOOL gc_heap::short_on_end_of_seg (int gen_number,
11794                                    heap_segment* seg,
11795                                    int align_const)
11796 {
11797     UNREFERENCED_PARAMETER(gen_number);
11798     uint8_t* allocated = heap_segment_allocated(seg);
11799
11800     return (!a_size_fit_p (end_space_after_gc(),
11801                           allocated,
11802                           heap_segment_reserved (seg), 
11803                           align_const));
11804 }
11805
11806 #ifdef _MSC_VER
11807 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
11808 #endif // _MSC_VER
11809
11810 inline
11811 BOOL gc_heap::a_fit_free_list_p (int gen_number, 
11812                                  size_t size, 
11813                                  alloc_context* acontext,
11814                                  int align_const)
11815 {
11816     BOOL can_fit = FALSE;
11817     generation* gen = generation_of (gen_number);
11818     allocator* gen_allocator = generation_allocator (gen);
11819     size_t sz_list = gen_allocator->first_bucket_size();
11820     for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
11821     {
11822         if ((size < sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
11823         {
11824             uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
11825             uint8_t* prev_free_item = 0;
11826
11827             while (free_list != 0)
11828             {
11829                 dprintf (3, ("considering free list %Ix", (size_t)free_list));
11830                 size_t free_list_size = unused_array_size (free_list);
11831                 if ((size + Align (min_obj_size, align_const)) <= free_list_size)
11832                 {
11833                     dprintf (3, ("Found adequate unused area: [%Ix, size: %Id",
11834                                  (size_t)free_list, free_list_size));
11835
11836                     gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
11837                     // We ask for more Align (min_obj_size)
11838                     // to make sure that we can insert a free object
11839                     // in adjust_limit will set the limit lower
11840                     size_t limit = limit_from_size (size, free_list_size, gen_number, align_const);
11841
11842                     uint8_t*  remain = (free_list + limit);
11843                     size_t remain_size = (free_list_size - limit);
11844                     if (remain_size >= Align(min_free_list, align_const))
11845                     {
11846                         make_unused_array (remain, remain_size);
11847                         gen_allocator->thread_item_front (remain, remain_size);
11848                         assert (remain_size >= Align (min_obj_size, align_const));
11849                     }
11850                     else
11851                     {
11852                         //absorb the entire free list
11853                         limit += remain_size;
11854                     }
11855                     generation_free_list_space (gen) -= limit;
11856
11857                     adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
11858
11859                     can_fit = TRUE;
11860                     goto end;
11861                 }
11862                 else if (gen_allocator->discard_if_no_fit_p())
11863                 {
11864                     assert (prev_free_item == 0);
11865                     dprintf (3, ("couldn't use this free area, discarding"));
11866                     generation_free_obj_space (gen) += free_list_size;
11867
11868                     gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
11869                     generation_free_list_space (gen) -= free_list_size;
11870                 }
11871                 else
11872                 {
11873                     prev_free_item = free_list;
11874                 }
11875                 free_list = free_list_slot (free_list); 
11876             }
11877         }
11878         sz_list = sz_list * 2;
11879     }
11880 end:
11881     return can_fit;
11882 }
11883
11884
11885 #ifdef BACKGROUND_GC
11886 void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
11887                                  size_t size, 
11888                                  alloc_context* acontext,
11889                                  int align_const, 
11890                                  int lock_index,
11891                                  BOOL check_used_p,
11892                                  heap_segment* seg)
11893 {
11894     make_unused_array (alloc_start, size);
11895
11896 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
11897     if (g_fEnableARM)
11898     {
11899         AppDomain* alloc_appdomain = GetAppDomain();
11900         alloc_appdomain->RecordAllocBytes (size, heap_number);
11901     }
11902 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
11903
11904     size_t size_of_array_base = sizeof(ArrayBase);
11905
11906     bgc_alloc_lock->loh_alloc_done_with_index (lock_index);
11907
11908     // clear memory while not holding the lock. 
11909     size_t size_to_skip = size_of_array_base;
11910     size_t size_to_clear = size - size_to_skip - plug_skew;
11911     size_t saved_size_to_clear = size_to_clear;
11912     if (check_used_p)
11913     {
11914         uint8_t* end = alloc_start + size - plug_skew;
11915         uint8_t* used = heap_segment_used (seg);
11916         if (used < end)
11917         {
11918             if ((alloc_start + size_to_skip) < used)
11919             {
11920                 size_to_clear = used - (alloc_start + size_to_skip);
11921             }
11922             else
11923             {
11924                 size_to_clear = 0;
11925             }
11926             dprintf (2, ("bgc loh: setting used to %Ix", end));
11927             heap_segment_used (seg) = end;
11928         }
11929
11930         dprintf (2, ("bgc loh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes",
11931                      used, alloc_start, end, size_to_clear));
11932     }
11933     else
11934     {
11935         dprintf (2, ("bgc loh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size));
11936     }
11937
11938 #ifdef VERIFY_HEAP
11939     // since we filled in 0xcc for free object when we verify heap,
11940     // we need to make sure we clear those bytes.
11941     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
11942     {
11943         if (size_to_clear < saved_size_to_clear)
11944         {
11945             size_to_clear = saved_size_to_clear;
11946         }
11947     }
11948 #endif //VERIFY_HEAP
11949     
11950     dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number));
11951     add_saved_spinlock_info (me_release, mt_clr_large_mem);
11952     leave_spin_lock (&more_space_lock);
11953     memclr (alloc_start + size_to_skip, size_to_clear);
11954
11955     bgc_alloc_lock->loh_alloc_set (alloc_start);
11956
11957     acontext->alloc_ptr = alloc_start;
11958     acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const));
11959
11960     // need to clear the rest of the object before we hand it out.
11961     clear_unused_array(alloc_start, size);
11962 }
11963 #endif //BACKGROUND_GC
11964
11965 BOOL gc_heap::a_fit_free_list_large_p (size_t size, 
11966                                        alloc_context* acontext,
11967                                        int align_const)
11968 {
11969 #ifdef BACKGROUND_GC
11970     wait_for_background_planning (awr_loh_alloc_during_plan);
11971 #endif //BACKGROUND_GC
11972
11973     BOOL can_fit = FALSE;
11974     int gen_number = max_generation + 1;
11975     generation* gen = generation_of (gen_number);
11976     allocator* loh_allocator = generation_allocator (gen); 
11977
11978 #ifdef FEATURE_LOH_COMPACTION
11979     size_t loh_pad = Align (loh_padding_obj_size, align_const);
11980 #endif //FEATURE_LOH_COMPACTION
11981
11982 #ifdef BACKGROUND_GC
11983     int cookie = -1;
11984 #endif //BACKGROUND_GC
11985     size_t sz_list = loh_allocator->first_bucket_size();
11986     for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
11987     {
11988         if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
11989         {
11990             uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
11991             uint8_t* prev_free_item = 0;
11992             while (free_list != 0)
11993             {
11994                 dprintf (3, ("considering free list %Ix", (size_t)free_list));
11995
11996                 size_t free_list_size = unused_array_size(free_list);
11997
11998 #ifdef FEATURE_LOH_COMPACTION
11999                 if ((size + loh_pad) <= free_list_size)
12000 #else
12001                 if (((size + Align (min_obj_size, align_const)) <= free_list_size)||
12002                     (size == free_list_size))
12003 #endif //FEATURE_LOH_COMPACTION
12004                 {
12005 #ifdef BACKGROUND_GC
12006                     cookie = bgc_alloc_lock->loh_alloc_set (free_list);
12007 #endif //BACKGROUND_GC
12008
12009                     //unlink the free_item
12010                     loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
12011
12012                     // Substract min obj size because limit_from_size adds it. Not needed for LOH
12013                     size_t limit = limit_from_size (size - Align(min_obj_size, align_const), free_list_size, 
12014                                                     gen_number, align_const);
12015
12016 #ifdef FEATURE_LOH_COMPACTION
12017                     make_unused_array (free_list, loh_pad);
12018                     limit -= loh_pad;
12019                     free_list += loh_pad;
12020                     free_list_size -= loh_pad;
12021 #endif //FEATURE_LOH_COMPACTION
12022
12023                     uint8_t*  remain = (free_list + limit);
12024                     size_t remain_size = (free_list_size - limit);
12025                     if (remain_size != 0)
12026                     {
12027                         assert (remain_size >= Align (min_obj_size, align_const));
12028                         make_unused_array (remain, remain_size);
12029                     }
12030                     if (remain_size >= Align(min_free_list, align_const))
12031                     {
12032                         loh_thread_gap_front (remain, remain_size, gen);
12033                         assert (remain_size >= Align (min_obj_size, align_const));
12034                     }
12035                     else
12036                     {
12037                         generation_free_obj_space (gen) += remain_size;
12038                     }
12039                     generation_free_list_space (gen) -= free_list_size;
12040                     dprintf (3, ("found fit on loh at %Ix", free_list));
12041 #ifdef BACKGROUND_GC
12042                     if (cookie != -1)
12043                     {
12044                         bgc_loh_alloc_clr (free_list, limit, acontext, align_const, cookie, FALSE, 0);
12045                     }
12046                     else
12047 #endif //BACKGROUND_GC
12048                     {
12049                         adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
12050                     }
12051
12052                     //fix the limit to compensate for adjust_limit_clr making it too short 
12053                     acontext->alloc_limit += Align (min_obj_size, align_const);
12054                     can_fit = TRUE;
12055                     goto exit;
12056                 }
12057                 prev_free_item = free_list;
12058                 free_list = free_list_slot (free_list); 
12059             }
12060         }
12061         sz_list = sz_list * 2;
12062     }
12063 exit:
12064     return can_fit;
12065 }
12066
12067 #ifdef _MSC_VER
12068 #pragma warning(default:4706)
12069 #endif // _MSC_VER
12070
12071 BOOL gc_heap::a_fit_segment_end_p (int gen_number,
12072                                    heap_segment* seg,
12073                                    size_t size, 
12074                                    alloc_context* acontext,
12075                                    int align_const,
12076                                    BOOL* commit_failed_p)
12077 {
12078     *commit_failed_p = FALSE;
12079     size_t limit = 0;
12080 #ifdef BACKGROUND_GC
12081     int cookie = -1;
12082 #endif //BACKGROUND_GC
12083
12084     uint8_t*& allocated = ((gen_number == 0) ?
12085                         alloc_allocated : 
12086                         heap_segment_allocated(seg));
12087
12088     size_t pad = Align (min_obj_size, align_const);
12089
12090 #ifdef FEATURE_LOH_COMPACTION
12091     if (gen_number == (max_generation + 1))
12092     {
12093         pad += Align (loh_padding_obj_size, align_const);
12094     }
12095 #endif //FEATURE_LOH_COMPACTION
12096
12097     uint8_t* end = heap_segment_committed (seg) - pad;
12098
12099     if (a_size_fit_p (size, allocated, end, align_const))
12100     {
12101         limit = limit_from_size (size, 
12102                                  (end - allocated), 
12103                                  gen_number, align_const);
12104         goto found_fit;
12105     }
12106
12107     end = heap_segment_reserved (seg) - pad;
12108
12109     if (a_size_fit_p (size, allocated, end, align_const))
12110     {
12111         limit = limit_from_size (size, 
12112                                  (end - allocated), 
12113                                  gen_number, align_const);
12114         if (grow_heap_segment (seg, allocated + limit))
12115         {
12116             goto found_fit;
12117         }
12118         else
12119         {
12120             dprintf (2, ("can't grow segment, doing a full gc"));
12121             *commit_failed_p = TRUE;
12122         }
12123     }
12124     goto found_no_fit;
12125
12126 found_fit:
12127
12128 #ifdef BACKGROUND_GC
12129     if (gen_number != 0)
12130     {
12131         cookie = bgc_alloc_lock->loh_alloc_set (allocated);
12132     }
12133 #endif //BACKGROUND_GC
12134
12135     uint8_t* old_alloc;
12136     old_alloc = allocated;
12137 #ifdef FEATURE_LOH_COMPACTION
12138     if (gen_number == (max_generation + 1))
12139     {
12140         size_t loh_pad = Align (loh_padding_obj_size, align_const);
12141         make_unused_array (old_alloc, loh_pad);
12142         old_alloc += loh_pad;
12143         allocated += loh_pad;
12144         limit -= loh_pad;
12145     }
12146 #endif //FEATURE_LOH_COMPACTION
12147
12148 #if defined (VERIFY_HEAP) && defined (_DEBUG)
12149         ((void**) allocated)[-1] = 0;     //clear the sync block
12150 #endif //VERIFY_HEAP && _DEBUG
12151     allocated += limit;
12152
12153     dprintf (3, ("found fit at end of seg: %Ix", old_alloc));
12154
12155 #ifdef BACKGROUND_GC
12156     if (cookie != -1)
12157     {
12158         bgc_loh_alloc_clr (old_alloc, limit, acontext, align_const, cookie, TRUE, seg);
12159     }
12160     else
12161 #endif //BACKGROUND_GC
12162     {
12163         adjust_limit_clr (old_alloc, limit, acontext, seg, align_const, gen_number);
12164     }
12165
12166     return TRUE;
12167
12168 found_no_fit:
12169
12170     return FALSE;
12171 }
12172
12173 BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
12174                                        size_t size, 
12175                                        alloc_context* acontext,
12176                                        int align_const,
12177                                        BOOL* commit_failed_p,
12178                                        oom_reason* oom_r)
12179 {
12180     *commit_failed_p = FALSE;
12181     heap_segment* seg = generation_allocation_segment (generation_of (gen_number));
12182     BOOL can_allocate_p = FALSE;
12183
12184     while (seg)
12185     {
12186         if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)), 
12187                                  acontext, align_const, commit_failed_p))
12188         {
12189             acontext->alloc_limit += Align (min_obj_size, align_const);
12190             can_allocate_p = TRUE;
12191             break;
12192         }
12193         else
12194         {
12195             if (*commit_failed_p)
12196             {
12197                 *oom_r = oom_cant_commit;
12198                 break;
12199             }
12200             else
12201             {
12202                 seg = heap_segment_next_rw (seg);
12203             }
12204         }
12205     }
12206
12207     return can_allocate_p;
12208 }
12209
12210 #ifdef BACKGROUND_GC
12211 inline
12212 void gc_heap::wait_for_background (alloc_wait_reason awr)
12213 {
12214     dprintf (2, ("BGC is already in progress, waiting for it to finish"));
12215     dprintf (SPINLOCK_LOG, ("[%d]Lmsl to wait for bgc done", heap_number));
12216     add_saved_spinlock_info (me_release, mt_wait_bgc);
12217     leave_spin_lock (&more_space_lock);
12218     background_gc_wait (awr);
12219     enter_spin_lock (&more_space_lock);
12220     add_saved_spinlock_info (me_acquire, mt_wait_bgc);
12221     dprintf (SPINLOCK_LOG, ("[%d]Emsl after waiting for bgc done", heap_number));
12222 }
12223
12224 void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr)
12225 {
12226     if (recursive_gc_sync::background_running_p())
12227     {
12228         uint32_t memory_load;
12229         get_memory_info (&memory_load);
12230         if (memory_load >= 95)
12231         {
12232             dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
12233             wait_for_background (awr);
12234         }
12235     }
12236 }
12237
12238 #endif //BACKGROUND_GC
12239
12240 // We request to trigger an ephemeral GC but we may get a full compacting GC.
12241 // return TRUE if that's the case.
12242 BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr)
12243 {
12244 #ifdef BACKGROUND_GC
12245     wait_for_bgc_high_memory (awr_loh_oos_bgc);
12246 #endif //BACKGROUND_GC
12247
12248     BOOL did_full_compact_gc = FALSE;
12249
12250     dprintf (2, ("triggering a gen1 GC"));
12251     size_t last_full_compact_gc_count = get_full_compact_gc_count();
12252     vm_heap->GarbageCollectGeneration(max_generation - 1, gr);
12253
12254 #ifdef MULTIPLE_HEAPS
12255     enter_spin_lock (&more_space_lock);
12256     add_saved_spinlock_info (me_acquire, mt_t_eph_gc);
12257     dprintf (SPINLOCK_LOG, ("[%d]Emsl after a GC", heap_number));
12258 #endif //MULTIPLE_HEAPS
12259
12260     size_t current_full_compact_gc_count = get_full_compact_gc_count();
12261
12262     if (current_full_compact_gc_count > last_full_compact_gc_count)
12263     {
12264         dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC"));
12265         did_full_compact_gc = TRUE;
12266     }
12267
12268     return did_full_compact_gc;
12269 }
12270
12271 BOOL gc_heap::soh_try_fit (int gen_number,
12272                            size_t size, 
12273                            alloc_context* acontext,
12274                            int align_const,
12275                            BOOL* commit_failed_p,
12276                            BOOL* short_seg_end_p)
12277 {
12278     BOOL can_allocate = TRUE;
12279     if (short_seg_end_p)
12280     {
12281         *short_seg_end_p = FALSE;
12282     }
12283
12284     can_allocate = a_fit_free_list_p (gen_number, size, acontext, align_const);
12285     if (!can_allocate)
12286     {
12287         if (short_seg_end_p)
12288         {
12289             *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
12290         }
12291         // If the caller doesn't care, we always try to fit at the end of seg;
12292         // otherwise we would only try if we are actually not short at end of seg.
12293         if (!short_seg_end_p || !(*short_seg_end_p))
12294         {
12295             can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size, 
12296                                                 acontext, align_const, commit_failed_p);
12297         }
12298     }
12299
12300     return can_allocate;
12301 }
12302
12303 BOOL gc_heap::allocate_small (int gen_number,
12304                               size_t size, 
12305                               alloc_context* acontext,
12306                               int align_const)
12307 {
12308 #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
12309     if (recursive_gc_sync::background_running_p())
12310     {
12311         background_soh_alloc_count++;
12312         if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
12313         {
12314             add_saved_spinlock_info (me_release, mt_alloc_small);
12315             dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl", heap_number));
12316             leave_spin_lock (&more_space_lock);
12317             bool cooperative_mode = enable_preemptive ();
12318             GCToOSInterface::Sleep (bgc_alloc_spin);
12319             disable_preemptive (cooperative_mode);
12320             enter_spin_lock (&more_space_lock);
12321             add_saved_spinlock_info (me_acquire, mt_alloc_small);
12322             dprintf (SPINLOCK_LOG, ("[%d]spin Emsl", heap_number));
12323         }
12324         else
12325         {
12326             //GCToOSInterface::YieldThread (0);
12327         }
12328     }
12329 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
12330
12331     gc_reason gr = reason_oos_soh;
12332     oom_reason oom_r = oom_no_failure;
12333
12334     // No variable values should be "carried over" from one state to the other. 
12335     // That's why there are local variable for each state
12336
12337     allocation_state soh_alloc_state = a_state_start;
12338
12339     // If we can get a new seg it means allocation will succeed.
12340     while (1)
12341     {
12342         dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state]));
12343         switch (soh_alloc_state)
12344         {
12345             case a_state_can_allocate:
12346             case a_state_cant_allocate:
12347             {
12348                 goto exit;
12349             }
12350             case a_state_start:
12351             {
12352                 soh_alloc_state = a_state_try_fit;
12353                 break;
12354             }
12355             case a_state_try_fit:
12356             {
12357                 BOOL commit_failed_p = FALSE;
12358                 BOOL can_use_existing_p = FALSE;
12359
12360                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12361                                                   align_const, &commit_failed_p,
12362                                                   NULL);
12363                 soh_alloc_state = (can_use_existing_p ?
12364                                         a_state_can_allocate : 
12365                                         (commit_failed_p ? 
12366                                             a_state_trigger_full_compact_gc :
12367                                             a_state_trigger_ephemeral_gc));
12368                 break;
12369             }
12370             case a_state_try_fit_after_bgc:
12371             {
12372                 BOOL commit_failed_p = FALSE;
12373                 BOOL can_use_existing_p = FALSE;
12374                 BOOL short_seg_end_p = FALSE;
12375
12376                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12377                                                   align_const, &commit_failed_p,
12378                                                   &short_seg_end_p);
12379                 soh_alloc_state = (can_use_existing_p ? 
12380                                         a_state_can_allocate : 
12381                                         (short_seg_end_p ? 
12382                                             a_state_trigger_2nd_ephemeral_gc : 
12383                                             a_state_trigger_full_compact_gc));
12384                 break;
12385             }
12386             case a_state_try_fit_after_cg:
12387             {
12388                 BOOL commit_failed_p = FALSE;
12389                 BOOL can_use_existing_p = FALSE;
12390                 BOOL short_seg_end_p = FALSE;
12391
12392                 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12393                                                   align_const, &commit_failed_p,
12394                                                   &short_seg_end_p);
12395                 if (short_seg_end_p)
12396                 {
12397                     soh_alloc_state = a_state_cant_allocate;
12398                     oom_r = oom_budget;
12399                 }
12400                 else
12401                 {
12402                     if (can_use_existing_p)
12403                     {
12404                         soh_alloc_state = a_state_can_allocate;
12405                     }
12406                     else
12407                     {
12408 #ifdef MULTIPLE_HEAPS
12409                         if (!commit_failed_p)
12410                         {
12411                             // some other threads already grabbed the more space lock and allocated
12412                             // so we should attempt an ephemeral GC again.
12413                             assert (heap_segment_allocated (ephemeral_heap_segment) < alloc_allocated);
12414                             soh_alloc_state = a_state_trigger_ephemeral_gc; 
12415                         }
12416                         else
12417 #endif //MULTIPLE_HEAPS
12418                         {
12419                             assert (commit_failed_p);
12420                             soh_alloc_state = a_state_cant_allocate;
12421                             oom_r = oom_cant_commit;
12422                         }
12423                     }
12424                 }
12425                 break;
12426             }
12427             case a_state_check_and_wait_for_bgc:
12428             {
12429                 BOOL bgc_in_progress_p = FALSE;
12430                 BOOL did_full_compacting_gc = FALSE;
12431
12432                 bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc);
12433                 soh_alloc_state = (did_full_compacting_gc ? 
12434                                         a_state_try_fit_after_cg : 
12435                                         a_state_try_fit_after_bgc);
12436                 break;
12437             }
12438             case a_state_trigger_ephemeral_gc:
12439             {
12440                 BOOL commit_failed_p = FALSE;
12441                 BOOL can_use_existing_p = FALSE;
12442                 BOOL short_seg_end_p = FALSE;
12443                 BOOL bgc_in_progress_p = FALSE;
12444                 BOOL did_full_compacting_gc = FALSE;
12445
12446                 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12447                 if (did_full_compacting_gc)
12448                 {
12449                     soh_alloc_state = a_state_try_fit_after_cg;
12450                 }
12451                 else
12452                 {
12453                     can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12454                                                       align_const, &commit_failed_p,
12455                                                       &short_seg_end_p);
12456 #ifdef BACKGROUND_GC
12457                     bgc_in_progress_p = recursive_gc_sync::background_running_p();
12458 #endif //BACKGROUND_GC
12459
12460                     if (short_seg_end_p)
12461                     {
12462                         soh_alloc_state = (bgc_in_progress_p ? 
12463                                                 a_state_check_and_wait_for_bgc : 
12464                                                 a_state_trigger_full_compact_gc);
12465
12466                         if (fgn_maxgen_percent)
12467                         {
12468                             dprintf (2, ("FGN: doing last GC before we throw OOM"));
12469                             send_full_gc_notification (max_generation, FALSE);
12470                         }
12471                     }
12472                     else
12473                     {
12474                         if (can_use_existing_p)
12475                         {
12476                             soh_alloc_state = a_state_can_allocate;
12477                         }
12478                         else
12479                         {
12480 #ifdef MULTIPLE_HEAPS
12481                             if (!commit_failed_p)
12482                             {
12483                                 // some other threads already grabbed the more space lock and allocated
12484                                 // so we should attempt an ephemeral GC again.
12485                                 assert (heap_segment_allocated (ephemeral_heap_segment) < alloc_allocated);
12486                                 soh_alloc_state = a_state_trigger_ephemeral_gc;
12487                             }
12488                             else
12489 #endif //MULTIPLE_HEAPS
12490                             {
12491                                 soh_alloc_state = a_state_trigger_full_compact_gc;
12492                                 if (fgn_maxgen_percent)
12493                                 {
12494                                     dprintf (2, ("FGN: failed to commit, doing full compacting GC"));
12495                                     send_full_gc_notification (max_generation, FALSE);
12496                                 }
12497                             }
12498                         }
12499                     }
12500                 }
12501                 break;
12502             }
12503             case a_state_trigger_2nd_ephemeral_gc:
12504             {
12505                 BOOL commit_failed_p = FALSE;
12506                 BOOL can_use_existing_p = FALSE;
12507                 BOOL short_seg_end_p = FALSE;
12508                 BOOL did_full_compacting_gc = FALSE;
12509
12510
12511                 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12512                 
12513                 if (did_full_compacting_gc)
12514                 {
12515                     soh_alloc_state = a_state_try_fit_after_cg;
12516                 }
12517                 else
12518                 {
12519                     can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12520                                                       align_const, &commit_failed_p,
12521                                                       &short_seg_end_p);
12522                     if (short_seg_end_p || commit_failed_p)
12523                     {
12524                         soh_alloc_state = a_state_trigger_full_compact_gc;
12525                     }
12526                     else
12527                     {
12528                         assert (can_use_existing_p);
12529                         soh_alloc_state = a_state_can_allocate;
12530                     }
12531                 }
12532                 break;
12533             }
12534             case a_state_trigger_full_compact_gc:
12535             {
12536                 BOOL got_full_compacting_gc = FALSE;
12537
12538                 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r);
12539                 soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
12540                 break;
12541             }
12542             default:
12543             {
12544                 assert (!"Invalid state!");
12545                 break;
12546             }
12547         }
12548     }
12549
12550 exit:
12551     if (soh_alloc_state == a_state_cant_allocate)
12552     {
12553         assert (oom_r != oom_no_failure);
12554         handle_oom (heap_number, 
12555                     oom_r, 
12556                     size,
12557                     heap_segment_allocated (ephemeral_heap_segment),
12558                     heap_segment_reserved (ephemeral_heap_segment));
12559
12560         dprintf (SPINLOCK_LOG, ("[%d]Lmsl for oom", heap_number));
12561         add_saved_spinlock_info (me_release, mt_alloc_small_cant);
12562         leave_spin_lock (&more_space_lock);
12563     }
12564
12565     return (soh_alloc_state == a_state_can_allocate);
12566 }
12567
12568 #ifdef BACKGROUND_GC
12569 inline
12570 void gc_heap::wait_for_background_planning (alloc_wait_reason awr)
12571 {
12572     while (current_c_gc_state == c_gc_state_planning)
12573     {
12574         dprintf (3, ("lh state planning, cannot allocate"));
12575
12576         dprintf (SPINLOCK_LOG, ("[%d]Lmsl to wait for bgc plan", heap_number));
12577         add_saved_spinlock_info (me_release, mt_wait_bgc_plan);
12578         leave_spin_lock (&more_space_lock);
12579         background_gc_wait_lh (awr);
12580         enter_spin_lock (&more_space_lock);
12581         add_saved_spinlock_info (me_acquire, mt_wait_bgc_plan);
12582         dprintf (SPINLOCK_LOG, ("[%d]Emsl after waiting for bgc plan", heap_number));
12583     }
12584     assert ((current_c_gc_state == c_gc_state_free) ||
12585             (current_c_gc_state == c_gc_state_marking));
12586 }
12587
12588 BOOL gc_heap::bgc_loh_should_allocate()
12589 {
12590     size_t min_gc_size = dd_min_size(dynamic_data_of (max_generation + 1));
12591
12592     if ((bgc_begin_loh_size + bgc_loh_size_increased) < (min_gc_size * 10))
12593     {
12594         return TRUE;
12595     }
12596
12597     if (((bgc_begin_loh_size / end_loh_size) >= 2) || (bgc_loh_size_increased >= bgc_begin_loh_size))
12598     {
12599         if ((bgc_begin_loh_size / end_loh_size) > 2)
12600         {
12601             dprintf (3, ("alloc-ed too much before bgc started"));
12602         }
12603         else
12604         {
12605             dprintf (3, ("alloc-ed too much after bgc started"));
12606         }
12607         return FALSE;
12608     }
12609     else
12610     {
12611         bgc_alloc_spin_loh = (uint32_t)(((float)bgc_loh_size_increased / (float)bgc_begin_loh_size) * 10);
12612         return TRUE;
12613     }
12614 }
12615 #endif //BACKGROUND_GC
12616
12617 size_t gc_heap::get_large_seg_size (size_t size)
12618 {
12619     size_t default_seg_size = min_loh_segment_size;
12620 #ifdef SEG_MAPPING_TABLE
12621     size_t align_size =  default_seg_size;
12622 #else //SEG_MAPPING_TABLE
12623     size_t align_size =  default_seg_size / 2;
12624 #endif //SEG_MAPPING_TABLE
12625     int align_const = get_alignment_constant (FALSE);
12626     size_t large_seg_size = align_on_page (
12627         max (default_seg_size,
12628             ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE +
12629             align_size) / align_size * align_size)));
12630     return large_seg_size;
12631 }
12632
12633 BOOL gc_heap::loh_get_new_seg (generation* gen,
12634                                size_t size,
12635                                int align_const,
12636                                BOOL* did_full_compact_gc,
12637                                oom_reason* oom_r)
12638 {
12639     UNREFERENCED_PARAMETER(gen);
12640     UNREFERENCED_PARAMETER(align_const);
12641
12642     *did_full_compact_gc = FALSE;
12643
12644     size_t seg_size = get_large_seg_size (size);
12645
12646     heap_segment* new_seg = get_large_segment (seg_size, did_full_compact_gc);
12647
12648     if (new_seg)
12649     {
12650         loh_alloc_since_cg += seg_size;
12651     }
12652     else
12653     {
12654         *oom_r = oom_loh;
12655     }
12656
12657     return (new_seg != 0);
12658 }
12659
12660 BOOL gc_heap::retry_full_compact_gc (size_t size)
12661 {
12662     size_t seg_size = get_large_seg_size (size);
12663
12664     if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size))
12665     {
12666         return TRUE;
12667     }
12668
12669 #ifdef MULTIPLE_HEAPS
12670     uint64_t total_alloc_size = 0;
12671     for (int i = 0; i < n_heaps; i++)
12672     {
12673         total_alloc_size += g_heaps[i]->loh_alloc_since_cg;
12674     }
12675
12676     if (total_alloc_size >= (2 * (uint64_t)seg_size))
12677     {
12678         return TRUE;
12679     }
12680 #endif //MULTIPLE_HEAPS
12681
12682     return FALSE;
12683 }
12684
12685 BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
12686                                       BOOL* did_full_compact_gc)
12687 {
12688     BOOL bgc_in_progress = FALSE;
12689     *did_full_compact_gc = FALSE;
12690 #ifdef BACKGROUND_GC
12691     if (recursive_gc_sync::background_running_p())
12692     {
12693         bgc_in_progress = TRUE;
12694         size_t last_full_compact_gc_count = get_full_compact_gc_count();
12695         wait_for_background (awr);
12696         size_t current_full_compact_gc_count = get_full_compact_gc_count();
12697         if (current_full_compact_gc_count > last_full_compact_gc_count)
12698         {
12699             *did_full_compact_gc = TRUE;
12700         }
12701     }
12702 #endif //BACKGROUND_GC
12703
12704     return bgc_in_progress;
12705 }
12706
12707 BOOL gc_heap::loh_try_fit (int gen_number,
12708                            size_t size, 
12709                            alloc_context* acontext,
12710                            int align_const,
12711                            BOOL* commit_failed_p,
12712                            oom_reason* oom_r)
12713 {
12714     BOOL can_allocate = TRUE;
12715
12716     if (!a_fit_free_list_large_p (size, acontext, align_const))
12717     {
12718         can_allocate = loh_a_fit_segment_end_p (gen_number, size, 
12719                                                 acontext, align_const, 
12720                                                 commit_failed_p, oom_r);
12721
12722 #ifdef BACKGROUND_GC
12723         if (can_allocate && recursive_gc_sync::background_running_p())
12724         {
12725             bgc_loh_size_increased += size;
12726         }
12727 #endif //BACKGROUND_GC
12728     }
12729 #ifdef BACKGROUND_GC
12730     else
12731     {
12732         if (recursive_gc_sync::background_running_p())
12733         {
12734             bgc_loh_allocated_in_free += size;
12735         }
12736     }
12737 #endif //BACKGROUND_GC
12738
12739     return can_allocate;
12740 }
12741
12742 BOOL gc_heap::trigger_full_compact_gc (gc_reason gr, 
12743                                        oom_reason* oom_r)
12744 {
12745     BOOL did_full_compact_gc = FALSE;
12746
12747     size_t last_full_compact_gc_count = get_full_compact_gc_count();
12748
12749     // Set this so the next GC will be a full compacting GC.
12750     if (!last_gc_before_oom)
12751     {
12752         last_gc_before_oom = TRUE;
12753     }
12754
12755 #ifdef BACKGROUND_GC
12756     if (recursive_gc_sync::background_running_p())
12757     {
12758         wait_for_background ((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc);
12759         dprintf (2, ("waited for BGC - done"));
12760     }
12761 #endif //BACKGROUND_GC
12762
12763     size_t current_full_compact_gc_count = get_full_compact_gc_count();
12764     if (current_full_compact_gc_count > last_full_compact_gc_count)
12765     {
12766         dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count));
12767         assert (current_full_compact_gc_count > last_full_compact_gc_count);
12768         did_full_compact_gc = TRUE;
12769         goto exit;
12770     }
12771
12772     dprintf (3, ("h%d full GC", heap_number));
12773     vm_heap->GarbageCollectGeneration(max_generation, gr);
12774
12775 #ifdef MULTIPLE_HEAPS
12776     enter_spin_lock (&more_space_lock);
12777     dprintf (SPINLOCK_LOG, ("[%d]Emsl after full gc", heap_number));
12778     add_saved_spinlock_info (me_acquire, mt_t_full_gc);
12779 #endif //MULTIPLE_HEAPS
12780
12781     current_full_compact_gc_count = get_full_compact_gc_count();
12782
12783     if (current_full_compact_gc_count == last_full_compact_gc_count)
12784     {
12785         dprintf (2, ("attempted to trigger a full compacting GC but didn't get it"));
12786         // We requested a full GC but didn't get because of the elevation logic
12787         // which means we should fail.
12788         *oom_r = oom_unproductive_full_gc;
12789     }
12790     else
12791     {
12792         dprintf (3, ("h%d: T full compacting GC (%d->%d)", 
12793             heap_number, 
12794             last_full_compact_gc_count, 
12795             current_full_compact_gc_count));
12796
12797         assert (current_full_compact_gc_count > last_full_compact_gc_count);
12798         did_full_compact_gc = TRUE;
12799     }
12800
12801 exit:
12802     return did_full_compact_gc;
12803 }
12804
12805 #ifdef RECORD_LOH_STATE
12806 void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
12807 {
12808     // When the state is can_allocate we already have released the more
12809     // space lock. So we are not logging states here since this code
12810     // is not thread safe.
12811     if (loh_state_to_save != a_state_can_allocate)
12812     {
12813         last_loh_states[loh_state_index].alloc_state = loh_state_to_save;
12814         last_loh_states[loh_state_index].thread_id = thread_id;
12815         loh_state_index++;
12816
12817         if (loh_state_index == max_saved_loh_states)
12818         {
12819             loh_state_index = 0;
12820         }
12821
12822         assert (loh_state_index < max_saved_loh_states);
12823     }
12824 }
12825 #endif //RECORD_LOH_STATE
12826
12827 BOOL gc_heap::allocate_large (int gen_number,
12828                               size_t size, 
12829                               alloc_context* acontext,
12830                               int align_const)
12831 {
12832 #ifdef BACKGROUND_GC
12833     if (recursive_gc_sync::background_running_p() && (current_c_gc_state != c_gc_state_planning))
12834     {
12835         background_loh_alloc_count++;
12836         //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0)
12837         {
12838             if (bgc_loh_should_allocate())
12839             {
12840                 if (!bgc_alloc_spin_loh)
12841                 {
12842                     add_saved_spinlock_info (me_release, mt_alloc_large);
12843                     dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl loh", heap_number));
12844                     leave_spin_lock (&more_space_lock);
12845                     bool cooperative_mode = enable_preemptive ();
12846                     GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
12847                     disable_preemptive (cooperative_mode);
12848                     enter_spin_lock (&more_space_lock);
12849                     add_saved_spinlock_info (me_acquire, mt_alloc_large);
12850                     dprintf (SPINLOCK_LOG, ("[%d]spin Emsl loh", heap_number));
12851                 }
12852             }
12853             else
12854             {
12855                 wait_for_background (awr_loh_alloc_during_bgc);
12856             }
12857         }
12858     }
12859 #endif //BACKGROUND_GC
12860
12861     gc_reason gr = reason_oos_loh;
12862     generation* gen = generation_of (gen_number);
12863     oom_reason oom_r = oom_no_failure;
12864     size_t current_full_compact_gc_count = 0;
12865
12866     // No variable values should be "carried over" from one state to the other. 
12867     // That's why there are local variable for each state
12868     allocation_state loh_alloc_state = a_state_start;
12869 #ifdef RECORD_LOH_STATE
12870     EEThreadId current_thread_id;
12871     current_thread_id.SetToCurrentThread();
12872 #endif //RECORD_LOH_STATE
12873
12874     // If we can get a new seg it means allocation will succeed.
12875     while (1)
12876     {
12877         dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[loh_alloc_state]));
12878
12879 #ifdef RECORD_LOH_STATE
12880         add_saved_loh_state (loh_alloc_state, current_thread_id);
12881 #endif //RECORD_LOH_STATE
12882         switch (loh_alloc_state)
12883         {
12884             case a_state_can_allocate:
12885             case a_state_cant_allocate:
12886             {
12887                 goto exit;
12888             }
12889             case a_state_start:
12890             {
12891                 loh_alloc_state = a_state_try_fit;
12892                 break;
12893             }
12894             case a_state_try_fit:
12895             {
12896                 BOOL commit_failed_p = FALSE;
12897                 BOOL can_use_existing_p = FALSE;
12898
12899                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12900                                                   align_const, &commit_failed_p, &oom_r);
12901                 loh_alloc_state = (can_use_existing_p ?
12902                                         a_state_can_allocate : 
12903                                         (commit_failed_p ? 
12904                                             a_state_trigger_full_compact_gc :
12905                                             a_state_acquire_seg));
12906                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12907                 break;
12908             }
12909             case a_state_try_fit_new_seg:
12910             {
12911                 BOOL commit_failed_p = FALSE;
12912                 BOOL can_use_existing_p = FALSE;
12913
12914                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12915                                                   align_const, &commit_failed_p, &oom_r);
12916                 // Even after we got a new seg it doesn't necessarily mean we can allocate,
12917                 // another LOH allocating thread could have beat us to acquire the msl so 
12918                 // we need to try again.
12919                 loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit);
12920                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12921                 break;
12922             }
12923             case a_state_try_fit_new_seg_after_cg:
12924             {
12925                 BOOL commit_failed_p = FALSE;
12926                 BOOL can_use_existing_p = FALSE;
12927
12928                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12929                                                   align_const, &commit_failed_p, &oom_r);
12930                 // Even after we got a new seg it doesn't necessarily mean we can allocate,
12931                 // another LOH allocating thread could have beat us to acquire the msl so 
12932                 // we need to try again. However, if we failed to commit, which means we 
12933                 // did have space on the seg, we bail right away 'cause we already did a 
12934                 // full compacting GC.
12935                 loh_alloc_state = (can_use_existing_p ? 
12936                                         a_state_can_allocate : 
12937                                         (commit_failed_p ? 
12938                                             a_state_cant_allocate :
12939                                             a_state_acquire_seg_after_cg));
12940                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12941                 break;
12942             }
12943             case a_state_try_fit_no_seg:
12944             {
12945                 BOOL commit_failed_p = FALSE;
12946                 BOOL can_use_existing_p = FALSE;
12947
12948                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12949                                                   align_const, &commit_failed_p, &oom_r);
12950                 loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_cant_allocate);
12951                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12952                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
12953                 break;
12954             }
12955             case a_state_try_fit_after_cg:
12956             {
12957                 BOOL commit_failed_p = FALSE;
12958                 BOOL can_use_existing_p = FALSE;
12959
12960                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12961                                                   align_const, &commit_failed_p, &oom_r);
12962                 loh_alloc_state = (can_use_existing_p ?
12963                                         a_state_can_allocate : 
12964                                         (commit_failed_p ? 
12965                                             a_state_cant_allocate :
12966                                             a_state_acquire_seg_after_cg));
12967                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12968                 break;
12969             }
12970             case a_state_try_fit_after_bgc:
12971             {
12972                 BOOL commit_failed_p = FALSE;
12973                 BOOL can_use_existing_p = FALSE;
12974
12975                 can_use_existing_p = loh_try_fit (gen_number, size, acontext, 
12976                                                   align_const, &commit_failed_p, &oom_r);
12977                 loh_alloc_state = (can_use_existing_p ?
12978                                         a_state_can_allocate : 
12979                                         (commit_failed_p ? 
12980                                             a_state_trigger_full_compact_gc :
12981                                             a_state_acquire_seg_after_bgc));
12982                 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
12983                 break;
12984             }
12985             case a_state_acquire_seg:
12986             {
12987                 BOOL can_get_new_seg_p = FALSE;
12988                 BOOL did_full_compacting_gc = FALSE;
12989
12990                 current_full_compact_gc_count = get_full_compact_gc_count();
12991
12992                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
12993                 loh_alloc_state = (can_get_new_seg_p ? 
12994                                         a_state_try_fit_new_seg : 
12995                                         (did_full_compacting_gc ? 
12996                                             a_state_check_retry_seg :
12997                                             a_state_check_and_wait_for_bgc));
12998                 break;
12999             }
13000             case a_state_acquire_seg_after_cg:
13001             {
13002                 BOOL can_get_new_seg_p = FALSE;
13003                 BOOL did_full_compacting_gc = FALSE;
13004
13005                 current_full_compact_gc_count = get_full_compact_gc_count();
13006
13007                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13008                 // Since we release the msl before we try to allocate a seg, other
13009                 // threads could have allocated a bunch of segments before us so
13010                 // we might need to retry.
13011                 loh_alloc_state = (can_get_new_seg_p ? 
13012                                         a_state_try_fit_new_seg_after_cg : 
13013                                         a_state_check_retry_seg);
13014                 break;
13015             }
13016             case a_state_acquire_seg_after_bgc:
13017             {
13018                 BOOL can_get_new_seg_p = FALSE;
13019                 BOOL did_full_compacting_gc = FALSE;
13020              
13021                 current_full_compact_gc_count = get_full_compact_gc_count();
13022
13023                 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r); 
13024                 loh_alloc_state = (can_get_new_seg_p ? 
13025                                         a_state_try_fit_new_seg : 
13026                                         (did_full_compacting_gc ? 
13027                                             a_state_check_retry_seg :
13028                                             a_state_trigger_full_compact_gc));
13029                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13030                 break;
13031             }
13032             case a_state_check_and_wait_for_bgc:
13033             {
13034                 BOOL bgc_in_progress_p = FALSE;
13035                 BOOL did_full_compacting_gc = FALSE;
13036
13037                 if (fgn_maxgen_percent)
13038                 {
13039                     dprintf (2, ("FGN: failed to acquire seg, may need to do a full blocking GC"));
13040                     send_full_gc_notification (max_generation, FALSE);
13041                 }
13042
13043                 bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc);
13044                 loh_alloc_state = (!bgc_in_progress_p ?
13045                                         a_state_trigger_full_compact_gc : 
13046                                         (did_full_compacting_gc ? 
13047                                             a_state_try_fit_after_cg :
13048                                             a_state_try_fit_after_bgc));
13049                 break;
13050             }
13051             case a_state_trigger_full_compact_gc:
13052             {
13053                 BOOL got_full_compacting_gc = FALSE;
13054
13055                 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r);
13056                 loh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
13057                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13058                 break;
13059             }
13060             case a_state_check_retry_seg:
13061             {
13062                 BOOL should_retry_gc = retry_full_compact_gc (size);
13063                 BOOL should_retry_get_seg = FALSE;
13064                 if (!should_retry_gc)
13065                 {
13066                     size_t last_full_compact_gc_count = current_full_compact_gc_count;
13067                     current_full_compact_gc_count = get_full_compact_gc_count();
13068
13069                     if (current_full_compact_gc_count > (last_full_compact_gc_count + 1))
13070                     {
13071                         should_retry_get_seg = TRUE;
13072                     }
13073                 }
13074     
13075                 loh_alloc_state = (should_retry_gc ? 
13076                                         a_state_trigger_full_compact_gc : 
13077                                         (should_retry_get_seg ?
13078                                             a_state_acquire_seg_after_cg :
13079                                             a_state_cant_allocate));
13080                 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13081                 break;
13082             }
13083             default:
13084             {
13085                 assert (!"Invalid state!");
13086                 break;
13087             }
13088         }
13089     }
13090
13091 exit:
13092     if (loh_alloc_state == a_state_cant_allocate)
13093     {
13094         assert (oom_r != oom_no_failure);
13095         handle_oom (heap_number, 
13096                     oom_r, 
13097                     size,
13098                     0,
13099                     0);
13100
13101         add_saved_spinlock_info (me_release, mt_alloc_large_cant);
13102         dprintf (SPINLOCK_LOG, ("[%d]Lmsl for loh oom", heap_number));
13103         leave_spin_lock (&more_space_lock);
13104     }
13105
13106     return (loh_alloc_state == a_state_can_allocate);
13107 }
13108
13109 int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
13110                                    int gen_number)
13111 {
13112     if (gc_heap::gc_started)
13113     {
13114         wait_for_gc_done();
13115         return -1;
13116     }
13117
13118 #ifdef SYNCHRONIZATION_STATS
13119     int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter();
13120 #endif //SYNCHRONIZATION_STATS
13121     enter_spin_lock (&more_space_lock);
13122     add_saved_spinlock_info (me_acquire, mt_try_alloc);
13123     dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number));
13124 #ifdef SYNCHRONIZATION_STATS
13125     int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start;
13126     total_msl_acquire += msl_acquire;
13127     num_msl_acquired++;
13128     if (msl_acquire > 200)
13129     {
13130         num_high_msl_acquire++;
13131     }
13132     else
13133     {
13134         num_low_msl_acquire++;
13135     }
13136 #endif //SYNCHRONIZATION_STATS
13137
13138     /*
13139     // We are commenting this out 'cause we don't see the point - we already
13140     // have checked gc_started when we were acquiring the msl - no need to check
13141     // again. This complicates the logic in bgc_suspend_EE 'cause that one would
13142     // need to release msl which causes all sorts of trouble.
13143     if (gc_heap::gc_started)
13144     {
13145 #ifdef SYNCHRONIZATION_STATS
13146         good_suspension++;
13147 #endif //SYNCHRONIZATION_STATS
13148         BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0;
13149         if (!fStress)
13150         {
13151             //Rendez vous early (MP scaling issue)
13152             //dprintf (1, ("[%d]waiting for gc", heap_number));
13153             wait_for_gc_done();
13154 #ifdef MULTIPLE_HEAPS
13155             return -1;
13156 #endif //MULTIPLE_HEAPS
13157         }
13158     }
13159     */
13160
13161     dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number));
13162
13163     int align_const = get_alignment_constant (gen_number != (max_generation+1));
13164
13165     if (fgn_maxgen_percent)
13166     {
13167         check_for_full_gc (gen_number, size);
13168     }
13169
13170     if (!(new_allocation_allowed (gen_number)))
13171     {
13172         if (fgn_maxgen_percent && (gen_number == 0))
13173         {
13174             // We only check gen0 every so often, so take this opportunity to check again.
13175             check_for_full_gc (gen_number, size);
13176         }
13177
13178 #ifdef BACKGROUND_GC
13179         wait_for_bgc_high_memory (awr_gen0_alloc);
13180 #endif //BACKGROUND_GC
13181
13182 #ifdef SYNCHRONIZATION_STATS
13183         bad_suspension++;
13184 #endif //SYNCHRONIZATION_STATS
13185         dprintf (/*100*/ 2, ("running out of budget on gen%d, gc", gen_number));
13186
13187         if (!settings.concurrent || (gen_number == 0))
13188         {
13189             vm_heap->GarbageCollectGeneration (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh));
13190 #ifdef MULTIPLE_HEAPS
13191             enter_spin_lock (&more_space_lock);
13192             add_saved_spinlock_info (me_acquire, mt_try_budget);
13193             dprintf (SPINLOCK_LOG, ("[%d]Emsl out budget", heap_number));
13194 #endif //MULTIPLE_HEAPS
13195         }
13196     }
13197
13198     BOOL can_allocate = ((gen_number == 0) ?
13199         allocate_small (gen_number, size, acontext, align_const) :
13200         allocate_large (gen_number, size, acontext, align_const));
13201    
13202     if (can_allocate)
13203     {
13204         size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
13205         int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
13206
13207         etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
13208
13209
13210         if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
13211         {
13212 #ifdef FEATURE_REDHAWK
13213             FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index],
13214                                             (gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh);
13215 #else
13216             // Unfortunately some of the ETW macros do not check whether the ETW feature is enabled.
13217             // The ones that do are much less efficient.
13218 #if defined(FEATURE_EVENT_TRACE)
13219             if (EVENT_ENABLED(GCAllocationTick_V3))
13220             {
13221                 fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr);
13222             }
13223 #endif //FEATURE_EVENT_TRACE
13224 #endif //FEATURE_REDHAWK
13225             etw_allocation_running_amount[etw_allocation_index] = 0;
13226         }
13227     }
13228
13229     return (int)can_allocate;
13230 }
13231
13232 #ifdef MULTIPLE_HEAPS
13233 void gc_heap::balance_heaps (alloc_context* acontext)
13234 {
13235
13236     if (acontext->alloc_count < 4)
13237     {
13238         if (acontext->alloc_count == 0)
13239         {
13240             acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, 0) ));
13241             gc_heap* hp = acontext->get_home_heap()->pGenGCHeap;
13242             dprintf (3, ("First allocation for context %Ix on heap %d\n", (size_t)acontext, (size_t)hp->heap_number));
13243             acontext->set_alloc_heap(acontext->get_home_heap());
13244             hp->alloc_context_count++;
13245         }
13246     }
13247     else
13248     {
13249         BOOL set_home_heap = FALSE;
13250         int hint = 0;
13251
13252         if (heap_select::can_find_heap_fast())
13253         {
13254             if (acontext->get_home_heap() != NULL)
13255                 hint = acontext->get_home_heap()->pGenGCHeap->heap_number;
13256             if (acontext->get_home_heap() != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
13257             {
13258                 set_home_heap = TRUE;
13259             }
13260         }
13261         else
13262         {
13263             // can't use gdt
13264             if ((acontext->alloc_count & 3) == 0)
13265                 set_home_heap = TRUE;
13266         }
13267
13268         if (set_home_heap)
13269         {
13270 /*
13271             // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this.
13272             if (n_heaps > MAX_SUPPORTED_CPUS)
13273             {
13274                 // on machines with many processors cache affinity is really king, so don't even try
13275                 // to balance on these.
13276                 acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, hint) );
13277                 acontext->alloc_heap = acontext->home_heap;
13278             }
13279             else
13280 */
13281             {
13282                 gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13283
13284                 dynamic_data* dd = org_hp->dynamic_data_of (0);
13285                 ptrdiff_t org_size = dd_new_allocation (dd);
13286                 int org_alloc_context_count;
13287                 int max_alloc_context_count;
13288                 gc_heap* max_hp;
13289                 ptrdiff_t max_size;
13290                 size_t delta = dd_min_size (dd)/4;
13291
13292                 int start, end, finish;
13293                 heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13294                 finish = start + n_heaps;
13295
13296 try_again:
13297                 do
13298                 {
13299                     max_hp = org_hp;
13300                     max_size = org_size + delta;
13301                     acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, hint) ));
13302
13303                     if (org_hp == acontext->get_home_heap()->pGenGCHeap)
13304                         max_size = max_size + delta;
13305
13306                     org_alloc_context_count = org_hp->alloc_context_count;
13307                     max_alloc_context_count = org_alloc_context_count;
13308                     if (max_alloc_context_count > 1)
13309                         max_size /= max_alloc_context_count;
13310
13311                     for (int i = start; i < end; i++)
13312                     {
13313                         gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13314                         dd = hp->dynamic_data_of (0);
13315                         ptrdiff_t size = dd_new_allocation (dd);
13316                         if (hp == acontext->get_home_heap()->pGenGCHeap)
13317                             size = size + delta;
13318                         int hp_alloc_context_count = hp->alloc_context_count;
13319                         if (hp_alloc_context_count > 0)
13320                             size /= (hp_alloc_context_count + 1);
13321                         if (size > max_size)
13322                         {
13323                             max_hp = hp;
13324                             max_size = size;
13325                             max_alloc_context_count = hp_alloc_context_count;
13326                         }
13327                     }
13328                 }
13329                 while (org_alloc_context_count != org_hp->alloc_context_count ||
13330                        max_alloc_context_count != max_hp->alloc_context_count);
13331
13332                 if ((max_hp == org_hp) && (end < finish))
13333                 {   
13334                     start = end; end = finish; 
13335                     delta = dd_min_size(dd)/2; // Make it twice as hard to balance to remote nodes on NUMA.
13336                     goto try_again;
13337                 }
13338
13339                 if (max_hp != org_hp)
13340                 {
13341                     org_hp->alloc_context_count--;
13342                     max_hp->alloc_context_count++;
13343                     acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
13344                     if (GCToOSInterface::CanEnableGCCPUGroups())
13345                     {   //only set ideal processor when max_hp and org_hp are in the same cpu
13346                         //group. DO NOT MOVE THREADS ACROSS CPU GROUPS
13347                         uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
13348                         uint16_t max_gn = heap_select::find_cpu_group_from_heap_no(max_hp->heap_number);
13349                         if (org_gn == max_gn) //only set within CPU group, so SetThreadIdealProcessor is enough
13350                         {   
13351                             uint16_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
13352
13353                             GCThreadAffinity affinity;
13354                             affinity.Processor = group_proc_no;
13355                             affinity.Group = org_gn;
13356                             if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
13357                             {
13358                                 dprintf (3, ("Failed to set the ideal processor and group for heap %d.",
13359                                             org_hp->heap_number));
13360                             }
13361                         }
13362                     }
13363                     else 
13364                     {
13365                         uint16_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
13366
13367                         GCThreadAffinity affinity;
13368                         affinity.Processor = proc_no;
13369                         affinity.Group = GCThreadAffinity::None;
13370
13371                         if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
13372                         {
13373                             dprintf (3, ("Failed to set the ideal processor for heap %d.",
13374                                         org_hp->heap_number));
13375                         }
13376                     }
13377                     dprintf (3, ("Switching context %p (home heap %d) ", 
13378                                  acontext,
13379                         acontext->get_home_heap()->pGenGCHeap->heap_number));
13380                     dprintf (3, (" from heap %d (%Id free bytes, %d contexts) ", 
13381                                  org_hp->heap_number,
13382                                  org_size,
13383                                  org_alloc_context_count));
13384                     dprintf (3, (" to heap %d (%Id free bytes, %d contexts)\n", 
13385                                  max_hp->heap_number,
13386                                  dd_new_allocation(max_hp->dynamic_data_of(0)),
13387                                                    max_alloc_context_count));
13388                 }
13389             }
13390         }
13391     }
13392     acontext->alloc_count++;
13393 }
13394
13395 gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t /*size*/)
13396 {
13397     gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13398     //dprintf (1, ("LA: %Id", size));
13399
13400     //if (size > 128*1024)
13401     if (1)
13402     {
13403         dynamic_data* dd = org_hp->dynamic_data_of (max_generation + 1);
13404
13405         ptrdiff_t org_size = dd_new_allocation (dd);
13406         gc_heap* max_hp;
13407         ptrdiff_t max_size;
13408         size_t delta = dd_min_size (dd) * 4;
13409
13410         int start, end, finish;
13411         heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13412         finish = start + n_heaps;
13413
13414 try_again:
13415         {
13416             max_hp = org_hp;
13417             max_size = org_size + delta;
13418             dprintf (3, ("orig hp: %d, max size: %d",
13419                 org_hp->heap_number,
13420                 max_size));
13421
13422             for (int i = start; i < end; i++)
13423             {
13424                 gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13425                 dd = hp->dynamic_data_of (max_generation + 1);
13426                 ptrdiff_t size = dd_new_allocation (dd);
13427                 dprintf (3, ("hp: %d, size: %d",
13428                     hp->heap_number,
13429                     size));
13430                 if (size > max_size)
13431                 {
13432                     max_hp = hp;
13433                     max_size = size;
13434                     dprintf (3, ("max hp: %d, max size: %d",
13435                         max_hp->heap_number,
13436                         max_size));
13437                 }
13438             }
13439         }
13440
13441         if ((max_hp == org_hp) && (end < finish))
13442         {
13443             start = end; end = finish;
13444             delta = dd_min_size(dd) * 4;   // Need to tuning delta
13445             goto try_again;
13446         }
13447
13448         if (max_hp != org_hp)
13449         {
13450             dprintf (3, ("loh: %d(%Id)->%d(%Id)", 
13451                 org_hp->heap_number, dd_new_allocation (org_hp->dynamic_data_of (max_generation + 1)),
13452                 max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (max_generation + 1))));
13453         }
13454
13455         return max_hp;
13456     }
13457     else
13458     {
13459         return org_hp;
13460     }
13461 }
13462 #endif //MULTIPLE_HEAPS
13463
13464 BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
13465                                   int alloc_generation_number)
13466 {
13467     int status;
13468     do
13469     { 
13470 #ifdef MULTIPLE_HEAPS
13471         if (alloc_generation_number == 0)
13472         {
13473             balance_heaps (acontext);
13474             status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
13475         }
13476         else
13477         {
13478             gc_heap* alloc_heap = balance_heaps_loh (acontext, size);
13479             status = alloc_heap->try_allocate_more_space (acontext, size, alloc_generation_number);
13480         }
13481 #else
13482         status = try_allocate_more_space (acontext, size, alloc_generation_number);
13483 #endif //MULTIPLE_HEAPS
13484     }
13485     while (status == -1);
13486     
13487     return (status != 0);
13488 }
13489
13490 inline
13491 CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext)
13492 {
13493     size_t size = Align (jsize);
13494     assert (size >= Align (min_obj_size));
13495     {
13496     retry:
13497         uint8_t*  result = acontext->alloc_ptr;
13498         acontext->alloc_ptr+=size;
13499         if (acontext->alloc_ptr <= acontext->alloc_limit)
13500         {
13501             CObjectHeader* obj = (CObjectHeader*)result;
13502             assert (obj != 0);
13503             return obj;
13504         }
13505         else
13506         {
13507             acontext->alloc_ptr -= size;
13508
13509 #ifdef _MSC_VER
13510 #pragma inline_depth(0)
13511 #endif //_MSC_VER
13512
13513             if (! allocate_more_space (acontext, size, 0))
13514                 return 0;
13515
13516 #ifdef _MSC_VER
13517 #pragma inline_depth(20)
13518 #endif //_MSC_VER
13519
13520             goto retry;
13521         }
13522     }
13523 }
13524
13525 inline
13526 CObjectHeader* gc_heap::try_fast_alloc (size_t jsize)
13527 {
13528     size_t size = Align (jsize);
13529     assert (size >= Align (min_obj_size));
13530     generation* gen = generation_of (0);
13531     uint8_t*  result = generation_allocation_pointer (gen);
13532     generation_allocation_pointer (gen) += size;
13533     if (generation_allocation_pointer (gen) <=
13534         generation_allocation_limit (gen))
13535     {
13536         return (CObjectHeader*)result;
13537     }
13538     else
13539     {
13540         generation_allocation_pointer (gen) -= size;
13541         return 0;
13542     }
13543 }
13544 void  gc_heap::leave_allocation_segment (generation* gen)
13545 {
13546     adjust_limit (0, 0, gen, max_generation);
13547 }
13548
13549 void gc_heap::init_free_and_plug()
13550 {
13551 #ifdef FREE_USAGE_STATS
13552     for (int i = 0; i <= settings.condemned_generation; i++)
13553     {
13554         generation* gen = generation_of (i);
13555         memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces));
13556         memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13557         memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces));
13558     }
13559
13560     if (settings.condemned_generation != max_generation)
13561     {
13562         for (int i = (settings.condemned_generation + 1); i <= max_generation; i++)
13563         {
13564             generation* gen = generation_of (i);
13565             memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13566         }
13567     }
13568 #endif //FREE_USAGE_STATS
13569 }
13570
13571 void gc_heap::print_free_and_plug (const char* msg)
13572 {
13573 #if defined(FREE_USAGE_STATS) && defined(SIMPLE_DPRINTF)
13574     int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1));
13575     for (int i = 0; i <= older_gen; i++)
13576     {
13577         generation* gen = generation_of (i);
13578         for (int j = 0; j < NUM_GEN_POWER2; j++)
13579         {
13580             if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0))
13581             {
13582                 dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id", 
13583                     msg, 
13584                     heap_number, 
13585                     (settings.concurrent ? "BGC" : "GC"),
13586                     settings.gc_index,
13587                     i,
13588                     (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j]));
13589             }
13590         }
13591     }
13592 #else
13593     UNREFERENCED_PARAMETER(msg);
13594 #endif //FREE_USAGE_STATS && SIMPLE_DPRINTF
13595 }
13596
13597 void gc_heap::add_gen_plug (int gen_number, size_t plug_size)
13598 {
13599 #ifdef FREE_USAGE_STATS
13600     dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number));
13601     generation* gen = generation_of (gen_number);
13602     size_t sz = BASE_GEN_SIZE;
13603     int i = 0;
13604
13605     for (; i < NUM_GEN_POWER2; i++)
13606     {
13607         if (plug_size < sz)
13608         {
13609             break;
13610         }
13611         sz = sz * 2;
13612     }
13613     
13614     (gen->gen_plugs[i])++;
13615 #else
13616     UNREFERENCED_PARAMETER(gen_number);
13617     UNREFERENCED_PARAMETER(plug_size);
13618 #endif //FREE_USAGE_STATS
13619 }
13620
13621 void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size)
13622 {
13623 #ifdef FREE_USAGE_STATS
13624     generation* gen = generation_of (gen_number);
13625     size_t sz = BASE_GEN_SIZE;
13626     int i = 0;
13627
13628     for (; i < NUM_GEN_POWER2; i++)
13629     {
13630         if (free_size < sz)
13631         {
13632             break;
13633         }
13634         sz = sz * 2;
13635     }
13636     
13637     (gen->gen_current_pinned_free_spaces[i])++;
13638     generation_pinned_free_obj_space (gen) += free_size;
13639     dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)", 
13640         free_size, (i + 10), gen_number, 
13641         generation_pinned_free_obj_space (gen),
13642         gen->gen_current_pinned_free_spaces[i]));
13643 #else
13644     UNREFERENCED_PARAMETER(gen_number);
13645     UNREFERENCED_PARAMETER(free_size);
13646 #endif //FREE_USAGE_STATS
13647 }
13648
13649 void gc_heap::add_gen_free (int gen_number, size_t free_size)
13650 {
13651 #ifdef FREE_USAGE_STATS
13652     dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number));
13653     generation* gen = generation_of (gen_number);
13654     size_t sz = BASE_GEN_SIZE;
13655     int i = 0;
13656
13657     for (; i < NUM_GEN_POWER2; i++)
13658     {
13659         if (free_size < sz)
13660         {
13661             break;
13662         }
13663         sz = sz * 2;
13664     }
13665     
13666     (gen->gen_free_spaces[i])++;
13667 #else
13668     UNREFERENCED_PARAMETER(gen_number);
13669     UNREFERENCED_PARAMETER(free_size);
13670 #endif //FREE_USAGE_STATS
13671 }
13672
13673 void gc_heap::remove_gen_free (int gen_number, size_t free_size)
13674 {
13675 #ifdef FREE_USAGE_STATS
13676     dprintf (3, ("removing free %Id from gen%d", free_size, gen_number));
13677     generation* gen = generation_of (gen_number);
13678     size_t sz = BASE_GEN_SIZE;
13679     int i = 0;
13680
13681     for (; i < NUM_GEN_POWER2; i++)
13682     {
13683         if (free_size < sz)
13684         {
13685             break;
13686         }
13687         sz = sz * 2;
13688     }
13689     
13690     (gen->gen_free_spaces[i])--;
13691 #else
13692     UNREFERENCED_PARAMETER(gen_number);
13693     UNREFERENCED_PARAMETER(free_size);
13694 #endif //FREE_USAGE_STATS
13695 }
13696
13697 uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
13698                                              int from_gen_number,
13699                                              uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL)
13700 {
13701     size = Align (size);
13702     assert (size >= Align (min_obj_size));
13703     assert (from_gen_number < max_generation);
13704     assert (from_gen_number >= 0);
13705     assert (generation_of (from_gen_number + 1) == gen);
13706
13707     allocator* gen_allocator = generation_allocator (gen);
13708     BOOL discard_p = gen_allocator->discard_if_no_fit_p ();
13709     int pad_in_front = (old_loc != 0)? USE_PADDING_FRONT : 0;
13710
13711     size_t real_size = size + Align (min_obj_size);
13712     if (pad_in_front)
13713         real_size += Align (min_obj_size);
13714
13715     if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
13716                        generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)))
13717     {
13718         size_t sz_list = gen_allocator->first_bucket_size();
13719         for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
13720         {
13721             if ((real_size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
13722             {
13723                 uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
13724                 uint8_t* prev_free_item = 0;
13725                 while (free_list != 0)
13726                 {
13727                     dprintf (3, ("considering free list %Ix", (size_t)free_list));
13728
13729                     size_t free_list_size = unused_array_size (free_list);
13730
13731                     if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size),
13732                                     old_loc, USE_PADDING_TAIL | pad_in_front))
13733                     {
13734                         dprintf (4, ("F:%Ix-%Id",
13735                                      (size_t)free_list, free_list_size));
13736
13737                         gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, !discard_p);
13738                         generation_free_list_space (gen) -= free_list_size;
13739                         remove_gen_free (gen->gen_num, free_list_size);
13740
13741                         adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
13742                         goto finished;
13743                     }
13744                     // We do first fit on bucket 0 because we are not guaranteed to find a fit there.
13745                     else if (discard_p || (a_l_idx == 0))
13746                     {
13747                         dprintf (3, ("couldn't use this free area, discarding"));
13748                         generation_free_obj_space (gen) += free_list_size;
13749
13750                         gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
13751                         generation_free_list_space (gen) -= free_list_size;
13752                         remove_gen_free (gen->gen_num, free_list_size);
13753                     }
13754                     else
13755                     {
13756                         prev_free_item = free_list;
13757                     }
13758                     free_list = free_list_slot (free_list); 
13759                 }
13760             }
13761             sz_list = sz_list * 2;
13762         }
13763         //go back to the beginning of the segment list 
13764         generation_allocate_end_seg_p (gen) = TRUE;
13765         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
13766         if (seg != generation_allocation_segment (gen))
13767         {
13768             leave_allocation_segment (gen);
13769             generation_allocation_segment (gen) = seg;
13770         }
13771         while (seg != ephemeral_heap_segment)
13772         {
13773             if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13774                            heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front))
13775             {
13776                 dprintf (3, ("using what's left in committed"));
13777                 adjust_limit (heap_segment_plan_allocated (seg),
13778                               heap_segment_committed (seg) -
13779                               heap_segment_plan_allocated (seg),
13780                               gen, from_gen_number+1);
13781                 // dformat (t, 3, "Expanding segment allocation");
13782                 heap_segment_plan_allocated (seg) =
13783                     heap_segment_committed (seg);
13784                 goto finished;
13785             }
13786             else
13787             {
13788                 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13789                                 heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
13790                     grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))
13791                 {
13792                     dprintf (3, ("using what's left in reserved"));
13793                     adjust_limit (heap_segment_plan_allocated (seg),
13794                                   heap_segment_committed (seg) -
13795                                   heap_segment_plan_allocated (seg),
13796                                   gen, from_gen_number+1);
13797                     heap_segment_plan_allocated (seg) =
13798                         heap_segment_committed (seg);
13799
13800                     goto finished;
13801                 }
13802                 else
13803                 {
13804                     leave_allocation_segment (gen);
13805                     heap_segment*   next_seg = heap_segment_next_rw (seg);
13806                     if (next_seg)
13807                     {
13808                         dprintf (3, ("getting next segment"));
13809                         generation_allocation_segment (gen) = next_seg;
13810                         generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
13811                         generation_allocation_limit (gen) = generation_allocation_pointer (gen);
13812                     }
13813                     else
13814                     {
13815                         size = 0;
13816                         goto finished;
13817                     }
13818                 }
13819             }
13820             seg = generation_allocation_segment (gen);
13821         }
13822         //No need to fix the last region. Will be done later
13823         size = 0;
13824         goto finished;
13825     }
13826     finished:
13827     if (0 == size)
13828     {
13829         return 0;
13830     }
13831     else
13832     {
13833         uint8_t*  result = generation_allocation_pointer (gen);
13834         size_t pad = 0;
13835
13836 #ifdef SHORT_PLUGS
13837         if ((pad_in_front & USE_PADDING_FRONT) &&
13838             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
13839              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
13840         {
13841             pad = Align (min_obj_size);
13842             set_plug_padded (old_loc);
13843         }
13844 #endif //SHORT_PLUGS
13845
13846 #ifdef FEATURE_STRUCTALIGN
13847         _ASSERTE(!old_loc || alignmentOffset != 0);
13848         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
13849         if (old_loc != 0)
13850         {
13851             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
13852             set_node_aligninfo (old_loc, requiredAlignment, pad1);
13853             pad += pad1;
13854         }
13855 #else // FEATURE_STRUCTALIGN
13856         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
13857         {
13858             pad += switch_alignment_size (is_plug_padded (old_loc));
13859             set_node_realigned (old_loc);
13860             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
13861                          (size_t)old_loc, (size_t)(result+pad)));
13862             assert (same_large_alignment_p (result + pad, old_loc));
13863         }
13864 #endif // FEATURE_STRUCTALIGN
13865         dprintf (3, ("Allocate %Id bytes", size));
13866
13867         if ((old_loc == 0) || (pad != 0))
13868         {
13869             //allocating a non plug or a gap, so reset the start region
13870             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
13871         }
13872
13873         generation_allocation_pointer (gen) += size + pad;
13874         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
13875         if (generation_allocate_end_seg_p (gen))
13876         {
13877             generation_end_seg_allocated (gen) += size;
13878         }
13879         else
13880         {
13881             generation_free_list_allocated (gen) += size;
13882         }
13883         generation_allocation_size (gen) += size;
13884
13885         dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix", 
13886             generation_allocation_pointer (gen), generation_allocation_limit (gen),
13887             generation_allocation_context_start_region (gen)));
13888
13889         return result + pad;;
13890     }
13891 }
13892
13893 void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen)
13894 {
13895     //make sure that every generation has a planned allocation start
13896     int  gen_number = max_generation - 1;
13897     while (gen_number>= 0)
13898     {
13899         generation* gen = generation_of (gen_number);
13900         if (0 == generation_plan_allocation_start (gen))
13901         {
13902             realloc_plan_generation_start (gen, consing_gen);
13903
13904             assert (generation_plan_allocation_start (gen));
13905         }
13906         gen_number--;
13907     }
13908
13909     // now we know the planned allocation size
13910     size_t  size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
13911     heap_segment* seg = generation_allocation_segment (consing_gen);
13912     if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg))
13913     {
13914         if (size != 0)
13915         {
13916             heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
13917         }
13918     }
13919     else
13920     {
13921         assert (settings.condemned_generation == max_generation);
13922         uint8_t* first_address = generation_allocation_limit (consing_gen);
13923         //look through the pinned plugs for relevant ones.
13924         //Look for the right pinned plug to start from.
13925         size_t mi = 0;
13926         mark* m = 0;
13927         while (mi != mark_stack_tos)
13928         {
13929             m = pinned_plug_of (mi);
13930             if ((pinned_plug (m) == first_address))
13931                 break;
13932             else
13933                 mi++;
13934         }
13935         assert (mi != mark_stack_tos);
13936         pinned_len (m) = size;
13937     }
13938 }
13939
13940 //tododefrag optimize for new segment (plan_allocated == mem)
13941 uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen,
13942                                           size_t size,
13943                                           BOOL& adjacentp,
13944                                           uint8_t* old_loc,
13945 #ifdef SHORT_PLUGS
13946                                           BOOL set_padding_on_saved_p,
13947                                           mark* pinned_plug_entry,
13948 #endif //SHORT_PLUGS
13949                                           BOOL consider_bestfit,
13950                                           int active_new_gen_number
13951                                           REQD_ALIGN_AND_OFFSET_DCL)
13952 {
13953     UNREFERENCED_PARAMETER(active_new_gen_number);
13954     dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size));
13955
13956     size = Align (size);
13957     assert (size >= Align (min_obj_size));
13958     int pad_in_front = (old_loc != 0) ? USE_PADDING_FRONT : 0;
13959
13960     if (consider_bestfit && use_bestfit)
13961     {
13962         assert (bestfit_seg);
13963         dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id", 
13964                     old_loc, size));
13965         return bestfit_seg->fit (old_loc, 
13966 #ifdef SHORT_PLUGS
13967                                  set_padding_on_saved_p,
13968                                  pinned_plug_entry,
13969 #endif //SHORT_PLUGS
13970                                  size REQD_ALIGN_AND_OFFSET_ARG);
13971     }
13972
13973     heap_segment* seg = generation_allocation_segment (gen);
13974
13975     if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
13976                        generation_allocation_limit (gen), old_loc,
13977                        ((generation_allocation_limit (gen) !=
13978                           heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front)))
13979     {
13980         dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen),
13981             generation_allocation_limit (gen)));
13982
13983         adjacentp = FALSE;
13984         uint8_t* first_address = (generation_allocation_limit (gen) ?
13985                                generation_allocation_limit (gen) :
13986                                heap_segment_mem (seg));
13987         assert (in_range_for_segment (first_address, seg));
13988
13989         uint8_t* end_address   = heap_segment_reserved (seg);
13990
13991         dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix",
13992             first_address, generation_allocation_limit (gen), end_address));
13993
13994         size_t mi = 0;
13995         mark* m = 0;
13996
13997         if (heap_segment_allocated (seg) != heap_segment_mem (seg))
13998         {
13999             assert (settings.condemned_generation == max_generation);
14000             //look through the pinned plugs for relevant ones.
14001             //Look for the right pinned plug to start from.
14002             while (mi != mark_stack_tos)
14003             {
14004                 m = pinned_plug_of (mi);
14005                 if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))
14006                 {
14007                     dprintf (3, ("aie: found pin: %Ix", pinned_plug (m)));
14008                     break;
14009                 }
14010                 else
14011                     mi++;
14012             }
14013             if (mi != mark_stack_tos)
14014             {
14015                 //fix old free list.
14016                 size_t  hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
14017                 {
14018                     dprintf(3,("gc filling up hole"));
14019                     ptrdiff_t mi1 = (ptrdiff_t)mi;
14020                     while ((mi1 >= 0) &&
14021                            (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen)))
14022                     {
14023                         dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1))));
14024                         mi1--;
14025                     }
14026                     if (mi1 >= 0)
14027                     {
14028                         size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1));
14029                         pinned_len (pinned_plug_of(mi1)) = hsize;
14030                         dprintf (3, ("changing %Ix len %Ix->%Ix", 
14031                             pinned_plug (pinned_plug_of(mi1)), 
14032                             saved_pinned_len, pinned_len (pinned_plug_of(mi1))));
14033                     }
14034                 }
14035             }
14036         }
14037         else
14038         {
14039             assert (generation_allocation_limit (gen) ==
14040                     generation_allocation_pointer (gen));
14041             mi = mark_stack_tos;
14042         }
14043
14044         while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg))
14045         {
14046             size_t len = pinned_len (m);
14047             uint8_t*  free_list = (pinned_plug (m) - len);
14048             dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)", 
14049                 free_list, (free_list + len), len));
14050             if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front))
14051             {
14052                 dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id",
14053                             (size_t)free_list, len));
14054                 {
14055                     generation_allocation_pointer (gen) = free_list;
14056                     generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14057                     generation_allocation_limit (gen) = (free_list + len);
14058                 }
14059                 goto allocate_in_free;
14060             }
14061             mi++;
14062             m = pinned_plug_of (mi);
14063         }
14064
14065         //switch to the end of the segment.
14066         generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg);
14067         generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14068         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14069         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14070         dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)", 
14071             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14072             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
14073
14074         if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14075                          generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))
14076         {
14077             dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen),
14078                 generation_allocation_limit (gen)));
14079             assert (!"Can't allocate if no free space");
14080             return 0;
14081         }
14082     }
14083     else
14084     {
14085         adjacentp = TRUE;
14086     }
14087
14088 allocate_in_free:
14089     {
14090         uint8_t*  result = generation_allocation_pointer (gen);
14091         size_t pad = 0;
14092
14093 #ifdef SHORT_PLUGS
14094         if ((pad_in_front & USE_PADDING_FRONT) &&
14095             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14096              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14097
14098         {
14099             pad = Align (min_obj_size);
14100             set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
14101         }
14102 #endif //SHORT_PLUGS
14103
14104 #ifdef FEATURE_STRUCTALIGN
14105         _ASSERTE(!old_loc || alignmentOffset != 0);
14106         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14107         if (old_loc != 0)
14108         {
14109             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14110             set_node_aligninfo (old_loc, requiredAlignment, pad1);
14111             pad += pad1;
14112             adjacentp = FALSE;
14113         }
14114 #else // FEATURE_STRUCTALIGN
14115         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14116         {
14117             pad += switch_alignment_size (is_plug_padded (old_loc));
14118             set_node_realigned (old_loc);
14119             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14120                          (size_t)old_loc, (size_t)(result+pad)));
14121             assert (same_large_alignment_p (result + pad, old_loc));
14122             adjacentp = FALSE;
14123         }
14124 #endif // FEATURE_STRUCTALIGN
14125
14126         if ((old_loc == 0) || (pad != 0))
14127         {
14128             //allocating a non plug or a gap, so reset the start region
14129             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14130         }
14131
14132         generation_allocation_pointer (gen) += size + pad;
14133         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14134         dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size));
14135
14136         dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix", 
14137             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14138             generation_allocation_context_start_region (gen)));
14139
14140         return result + pad;
14141     }
14142 }
14143
14144 generation*  gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen)
14145 {
14146     heap_segment* seg = generation_allocation_segment (consing_gen);
14147     if (seg != ephemeral_heap_segment)
14148     {
14149         assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg));
14150         assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg));
14151
14152         //fix the allocated size of the segment.
14153         heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
14154
14155         generation* new_consing_gen = generation_of (max_generation - 1);
14156         generation_allocation_pointer (new_consing_gen) =
14157                 heap_segment_mem (ephemeral_heap_segment);
14158         generation_allocation_limit (new_consing_gen) =
14159             generation_allocation_pointer (new_consing_gen);
14160         generation_allocation_context_start_region (new_consing_gen) = 
14161             generation_allocation_pointer (new_consing_gen);
14162         generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment;
14163
14164         return new_consing_gen;
14165     }
14166     else
14167         return consing_gen;
14168 }
14169
14170 uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen,
14171                                                   size_t size,
14172                                                   int from_gen_number,
14173 #ifdef SHORT_PLUGS
14174                                                   BOOL* convert_to_pinned_p,
14175                                                   uint8_t* next_pinned_plug,
14176                                                   heap_segment* current_seg,
14177 #endif //SHORT_PLUGS
14178                                                   uint8_t* old_loc
14179                                                   REQD_ALIGN_AND_OFFSET_DCL)
14180 {
14181     // Make sure that the youngest generation gap hasn't been allocated
14182     if (settings.promotion)
14183     {
14184         assert (generation_plan_allocation_start (youngest_generation) == 0);
14185     }
14186
14187     size = Align (size);
14188     assert (size >= Align (min_obj_size));
14189     int to_gen_number = from_gen_number;
14190     if (from_gen_number != (int)max_generation)
14191     {
14192         to_gen_number = from_gen_number + (settings.promotion ? 1 : 0);
14193     }
14194
14195     dprintf (3, ("aic gen%d: s: %Id, %d->%d, %Ix->%Ix", gen->gen_num, size, from_gen_number, 
14196           to_gen_number, generation_allocation_pointer(gen), generation_allocation_limit(gen)));
14197
14198     int pad_in_front = (old_loc != 0) ? USE_PADDING_FRONT : 0;
14199
14200     if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion)
14201     {
14202         generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14203         generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14204     }
14205 retry:
14206     {
14207         heap_segment* seg = generation_allocation_segment (gen);
14208         if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14209                            generation_allocation_limit (gen), old_loc,
14210                            ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front)))
14211         {
14212             if ((! (pinned_plug_que_empty_p()) &&
14213                  (generation_allocation_limit (gen) ==
14214                   pinned_plug (oldest_pin()))))
14215             {
14216                 size_t entry = deque_pinned_plug();
14217                 mark* pinned_plug_entry = pinned_plug_of (entry);
14218                 size_t len = pinned_len (pinned_plug_entry);
14219                 uint8_t* plug = pinned_plug (pinned_plug_entry);
14220                 set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen));
14221
14222 #ifdef FREE_USAGE_STATS
14223                 generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen);
14224                 dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id", 
14225                     generation_allocated_since_last_pin (gen), 
14226                     plug,
14227                     generation_allocated_in_pinned_free (gen)));
14228                 generation_allocated_since_last_pin (gen) = 0;
14229
14230                 add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry)));
14231 #endif //FREE_USAGE_STATS
14232
14233                 dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix", 
14234                     mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry))));
14235
14236                 assert(mark_stack_array[entry].len == 0 ||
14237                        mark_stack_array[entry].len >= Align(min_obj_size));
14238                 generation_allocation_pointer (gen) = plug + len;
14239                 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14240                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14241                 set_allocator_next_pin (gen);
14242
14243                 //Add the size of the pinned plug to the right pinned allocations
14244                 //find out which gen this pinned plug came from 
14245                 int frgn = object_gennum (plug);
14246                 if ((frgn != (int)max_generation) && settings.promotion)
14247                 {
14248                     generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
14249                     int togn = object_gennum_plan (plug);
14250                     if (frgn < togn)
14251                     {
14252                         generation_pinned_allocation_compact_size (generation_of (togn)) += len;
14253                     }
14254                 }
14255                 goto retry;
14256             }
14257             
14258             if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
14259             {
14260                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14261                 dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen)));
14262             }
14263             else
14264             {
14265                 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
14266                 {
14267                     heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14268                     generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14269                     dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen)));
14270                 }
14271                 else
14272                 {
14273 #ifndef RESPECT_LARGE_ALIGNMENT
14274                     assert (gen != youngest_generation);
14275 #endif //RESPECT_LARGE_ALIGNMENT
14276
14277                     if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14278                                     heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
14279                         (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc,
14280                                             size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)))
14281                     {
14282                         dprintf (3, ("Expanded segment allocation by committing more memory"));
14283                         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14284                         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14285                     }
14286                     else
14287                     {
14288                         heap_segment*   next_seg = heap_segment_next (seg);
14289                         assert (generation_allocation_pointer (gen)>=
14290                                 heap_segment_mem (seg));
14291                         // Verify that all pinned plugs for this segment are consumed
14292                         if (!pinned_plug_que_empty_p() &&
14293                             ((pinned_plug (oldest_pin()) <
14294                               heap_segment_allocated (seg)) &&
14295                              (pinned_plug (oldest_pin()) >=
14296                               generation_allocation_pointer (gen))))
14297                         {
14298                             LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
14299                                          pinned_plug (oldest_pin())));
14300                             FATAL_GC_ERROR();
14301                         }
14302                         assert (generation_allocation_pointer (gen)>=
14303                                 heap_segment_mem (seg));
14304                         assert (generation_allocation_pointer (gen)<=
14305                                 heap_segment_committed (seg));
14306                         heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
14307
14308                         if (next_seg)
14309                         {
14310                             generation_allocation_segment (gen) = next_seg;
14311                             generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
14312                             generation_allocation_limit (gen) = generation_allocation_pointer (gen);
14313                             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14314                         }
14315                         else
14316                         {
14317                             return 0; //should only happen during allocation of generation 0 gap
14318                             // in that case we are going to grow the heap anyway
14319                         }
14320                     }
14321                 }
14322             }
14323             set_allocator_next_pin (gen);
14324
14325             goto retry;
14326         }
14327     }
14328
14329     {
14330         assert (generation_allocation_pointer (gen)>=
14331                 heap_segment_mem (generation_allocation_segment (gen)));
14332         uint8_t* result = generation_allocation_pointer (gen);
14333         size_t pad = 0;
14334 #ifdef SHORT_PLUGS
14335         if ((pad_in_front & USE_PADDING_FRONT) &&
14336             (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14337              ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14338         {
14339             ptrdiff_t dist = old_loc - result;
14340             if (dist == 0)
14341             {
14342                 dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc));
14343                 pad = 0;
14344             }
14345             else
14346             {
14347                 if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size)))
14348                 {
14349                     dprintf (3, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist));
14350                     FATAL_GC_ERROR();
14351                 }
14352
14353                 pad = Align (min_obj_size);
14354                 set_plug_padded (old_loc);
14355             }
14356         }
14357 #endif //SHORT_PLUGS
14358 #ifdef FEATURE_STRUCTALIGN
14359         _ASSERTE(!old_loc || alignmentOffset != 0);
14360         _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14361         if ((old_loc != 0))
14362         {
14363             size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14364             set_node_aligninfo (old_loc, requiredAlignment, pad1);
14365             pad += pad1;
14366         }
14367 #else // FEATURE_STRUCTALIGN
14368         if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14369         {
14370             pad += switch_alignment_size (is_plug_padded (old_loc));
14371             set_node_realigned(old_loc);
14372             dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14373                          (size_t)old_loc, (size_t)(result+pad)));
14374             assert (same_large_alignment_p (result + pad, old_loc));
14375         }
14376 #endif // FEATURE_STRUCTALIGN
14377
14378 #ifdef SHORT_PLUGS
14379         if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg))
14380         {
14381             assert (old_loc != 0);
14382             ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad));
14383             assert (dist_to_next_pin >= 0);
14384
14385             if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size)))
14386             {
14387                 dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP", 
14388                     old_loc, 
14389                     generation_allocation_pointer (gen),
14390                     generation_allocation_limit (gen),
14391                     next_pinned_plug,
14392                     size, 
14393                     dist_to_next_pin));
14394                 clear_plug_padded (old_loc);
14395                 pad = 0;
14396                 *convert_to_pinned_p = TRUE;
14397                 record_interesting_data_point (idp_converted_pin);
14398
14399                 return 0;
14400             }
14401         }
14402 #endif //SHORT_PLUGS
14403
14404         if ((old_loc == 0) || (pad != 0))
14405         {
14406             //allocating a non plug or a gap, so reset the start region
14407             generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14408         }
14409
14410         generation_allocation_pointer (gen) += size + pad;
14411         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14412
14413 #ifdef FREE_USAGE_STATS
14414         generation_allocated_since_last_pin (gen) += size;
14415 #endif //FREE_USAGE_STATS
14416
14417         dprintf (3, ("aic: ptr: %Ix, limit: %Ix, sr: %Ix", 
14418             generation_allocation_pointer (gen), generation_allocation_limit (gen),
14419             generation_allocation_context_start_region (gen)));
14420
14421         assert (result + pad);
14422         return result + pad;
14423     }
14424 }
14425
14426 inline int power (int x, int y)
14427 {
14428     int z = 1;
14429     for (int i = 0; i < y; i++)
14430     {
14431         z = z*x;
14432     }
14433     return z;
14434 }
14435
14436 int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation, 
14437                                            int n_initial,
14438                                            BOOL* blocking_collection_p
14439                                            STRESS_HEAP_ARG(int n_original))
14440 {
14441     int n = n_initial;
14442 #ifdef MULTIPLE_HEAPS
14443     BOOL blocking_p = *blocking_collection_p;
14444     if (!blocking_p)
14445     {
14446         for (int i = 0; i < n_heaps; i++)
14447         {
14448             if (g_heaps[i]->last_gc_before_oom)
14449             {
14450                 dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i));
14451                 *blocking_collection_p = TRUE;
14452                 break;
14453             }
14454         }
14455     }
14456 #endif //MULTIPLE_HEAPS
14457
14458     if (should_evaluate_elevation && (n == max_generation))
14459     {
14460         dprintf (GTC_LOG, ("lock: %d(%d)", 
14461             (settings.should_lock_elevation ? 1 : 0), 
14462             settings.elevation_locked_count));
14463
14464         if (settings.should_lock_elevation)
14465         {
14466             settings.elevation_locked_count++;
14467             if (settings.elevation_locked_count == 6)
14468             {
14469                 settings.elevation_locked_count = 0;
14470             }
14471             else
14472             {
14473                 n = max_generation - 1;
14474                 settings.elevation_reduced = TRUE;
14475             }
14476         }
14477         else
14478         {
14479             settings.elevation_locked_count = 0;
14480         }
14481     }
14482     else
14483     {
14484         settings.should_lock_elevation = FALSE;
14485         settings.elevation_locked_count = 0;
14486     }
14487
14488 #ifdef STRESS_HEAP
14489 #ifdef BACKGROUND_GC
14490     // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
14491     // generations to be collected,
14492
14493     // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple
14494     // things that need to be fixed in this code block.
14495     if (n_original != max_generation &&
14496         g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
14497     {
14498 #ifndef FEATURE_REDHAWK
14499         // for the GC stress mix mode throttle down gen2 collections
14500         if (g_pConfig->IsGCStressMix())
14501         {
14502             size_t current_gc_count = 0;
14503
14504 #ifdef MULTIPLE_HEAPS
14505             current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
14506 #else
14507             current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
14508 #endif //MULTIPLE_HEAPS
14509             // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
14510             if ((current_gc_count % 10) == 0)
14511             {
14512                 n = max_generation;
14513             }
14514         }
14515         // for traditional GC stress
14516         else
14517 #endif // !FEATURE_REDHAWK
14518         if (*blocking_collection_p)
14519         {
14520             // We call StressHeap() a lot for Concurrent GC Stress. However,
14521             // if we can not do a concurrent collection, no need to stress anymore.
14522             // @TODO: Enable stress when the memory pressure goes down again
14523             GCStressPolicy::GlobalDisable();
14524         }
14525         else
14526         {
14527             n = max_generation;
14528         }
14529     }
14530 #endif //BACKGROUND_GC
14531 #endif //STRESS_HEAP
14532
14533     return n;
14534 }
14535
14536 inline
14537 size_t get_survived_size (gc_history_per_heap* hist)
14538 {
14539     size_t surv_size = 0;
14540     gc_generation_data* gen_data;
14541
14542     for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
14543     {
14544         gen_data = &(hist->gen_data[gen_number]); 
14545         surv_size += (gen_data->size_after - 
14546                       gen_data->free_list_space_after - 
14547                       gen_data->free_obj_space_after);
14548     }
14549
14550     return surv_size;
14551 }
14552
14553 size_t gc_heap::get_total_survived_size()
14554 {
14555     size_t total_surv_size = 0;
14556 #ifdef MULTIPLE_HEAPS
14557     for (int i = 0; i < gc_heap::n_heaps; i++)
14558     {
14559         gc_heap* hp = gc_heap::g_heaps[i];
14560         gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
14561         total_surv_size += get_survived_size (current_gc_data_per_heap);
14562     }
14563 #else
14564     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
14565     total_surv_size = get_survived_size (current_gc_data_per_heap);
14566 #endif //MULTIPLE_HEAPS
14567     return total_surv_size;
14568 }
14569
14570 // Gets what's allocated on both SOH and LOH that hasn't been collected.
14571 size_t gc_heap::get_current_allocated()
14572 {
14573     dynamic_data* dd = dynamic_data_of (0);
14574     size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd);
14575     dd = dynamic_data_of (max_generation + 1);
14576     current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd);
14577
14578     return current_alloc;
14579 }
14580
14581 size_t gc_heap::get_total_allocated()
14582 {
14583     size_t total_current_allocated = 0;
14584 #ifdef MULTIPLE_HEAPS
14585     for (int i = 0; i < gc_heap::n_heaps; i++)
14586     {
14587         gc_heap* hp = gc_heap::g_heaps[i];
14588         total_current_allocated += hp->get_current_allocated();
14589     }
14590 #else
14591     total_current_allocated = get_current_allocated();
14592 #endif //MULTIPLE_HEAPS
14593     return total_current_allocated;
14594 }
14595
14596 size_t gc_heap::current_generation_size (int gen_number)
14597 {
14598     dynamic_data* dd = dynamic_data_of (gen_number);
14599     size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd)
14600                         - dd_new_allocation (dd));
14601
14602     return gen_size;
14603 }
14604
14605 #ifdef _PREFAST_
14606 #pragma warning(push)
14607 #pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
14608 #endif //_PREFAST_
14609
14610 /*
14611     This is called by when we are actually doing a GC, or when we are just checking whether
14612     we would do a full blocking GC, in which case check_only_p is TRUE.
14613
14614     The difference between calling this with check_only_p TRUE and FALSE is that when it's
14615     TRUE: 
14616             settings.reason is ignored
14617             budgets are not checked (since they are checked before this is called)
14618             it doesn't change anything non local like generation_skip_ratio
14619 */
14620 int gc_heap::generation_to_condemn (int n_initial, 
14621                                     BOOL* blocking_collection_p, 
14622                                     BOOL* elevation_requested_p,
14623                                     BOOL check_only_p)
14624 {
14625     gc_mechanisms temp_settings = settings;
14626     gen_to_condemn_tuning temp_condemn_reasons;
14627     gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings);
14628     gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons);
14629     if (!check_only_p)
14630     {
14631         if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh))
14632         {
14633             assert (n_initial >= 1);
14634         }
14635
14636         assert (settings.reason != reason_empty);
14637     }
14638
14639     local_condemn_reasons->init();
14640
14641     int n = n_initial;
14642     int n_alloc = n;
14643     if (heap_number == 0)
14644     {
14645         dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason));
14646     }
14647     int i = 0;
14648     int temp_gen = 0;
14649     BOOL low_memory_detected = g_low_memory_status;
14650     uint32_t memory_load = 0;
14651     uint64_t available_physical = 0;
14652     uint64_t available_page_file = 0;
14653     BOOL check_memory = FALSE;
14654     BOOL high_fragmentation  = FALSE;
14655     BOOL v_high_memory_load  = FALSE;
14656     BOOL high_memory_load    = FALSE;
14657     BOOL low_ephemeral_space = FALSE;
14658     BOOL evaluate_elevation  = TRUE;
14659     *elevation_requested_p   = FALSE;
14660     *blocking_collection_p   = FALSE;
14661
14662     BOOL check_max_gen_alloc = TRUE;
14663
14664 #ifdef STRESS_HEAP
14665     int orig_gen = n;
14666 #endif //STRESS_HEAP
14667
14668     if (!check_only_p)
14669     {
14670         dd_fragmentation (dynamic_data_of (0)) = 
14671             generation_free_list_space (youngest_generation) + 
14672             generation_free_obj_space (youngest_generation);
14673
14674         dd_fragmentation (dynamic_data_of (max_generation + 1)) = 
14675             generation_free_list_space (large_object_generation) + 
14676             generation_free_obj_space (large_object_generation);
14677
14678         //save new_allocation
14679         for (i = 0; i <= max_generation+1; i++)
14680         {
14681             dynamic_data* dd = dynamic_data_of (i);
14682             dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)", 
14683                             heap_number, i,
14684                             dd_new_allocation (dd),
14685                             dd_desired_allocation (dd)));
14686             dd_gc_new_allocation (dd) = dd_new_allocation (dd);
14687         }
14688
14689         local_condemn_reasons->set_gen (gen_initial, n);
14690         temp_gen = n;
14691
14692 #ifdef BACKGROUND_GC
14693         if (recursive_gc_sync::background_running_p())
14694         {
14695             dprintf (GTC_LOG, ("bgc in prog, 1"));
14696             check_max_gen_alloc = FALSE;
14697         }
14698 #endif //BACKGROUND_GC
14699
14700         if (check_max_gen_alloc)
14701         {
14702             //figure out if large objects need to be collected.
14703             if (get_new_allocation (max_generation+1) <= 0)
14704             {
14705                 n = max_generation;
14706                 local_condemn_reasons->set_gen (gen_alloc_budget, n);
14707             }
14708         }
14709
14710         //figure out which generation ran out of allocation
14711         for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++)
14712         {
14713             if (get_new_allocation (i) <= 0)
14714             {
14715                 n = i;
14716             }
14717             else
14718                 break;
14719         }
14720     }
14721
14722     if (n > temp_gen)
14723     {
14724         local_condemn_reasons->set_gen (gen_alloc_budget, n);
14725     }
14726
14727     dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (max_generation+1) <= 0) ? 3 : n)));
14728
14729     n_alloc = n;
14730
14731 #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
14732     //time based tuning
14733     // if enough time has elapsed since the last gc
14734     // and the number of gc is too low (1/10 of lower gen) then collect
14735     // This should also be enabled if we have memory concerns
14736     int n_time_max = max_generation;
14737
14738     if (!check_only_p)
14739     {
14740         if (recursive_gc_sync::background_running_p())
14741         {
14742             n_time_max = max_generation - 1;
14743         }
14744     }
14745
14746     if ((local_settings->pause_mode == pause_interactive) ||
14747         (local_settings->pause_mode == pause_sustained_low_latency))
14748     {
14749         dynamic_data* dd0 = dynamic_data_of (0);
14750         size_t now = GetHighPrecisionTimeStamp();
14751         temp_gen = n;
14752         for (i = (temp_gen+1); i <= n_time_max; i++)
14753         {
14754             dynamic_data* dd = dynamic_data_of (i);
14755             if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) &&
14756                 (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) &&
14757                 ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0)))))
14758             {
14759                 n = min (i, n_time_max);
14760                 dprintf (GTC_LOG, ("time %d", n));
14761             }
14762         }
14763         if (n > temp_gen)
14764         {
14765             local_condemn_reasons->set_gen (gen_time_tuning, n);
14766         }
14767     }
14768
14769     if (n != n_alloc)
14770     {
14771         dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n));
14772     }
14773 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
14774
14775     if (n < (max_generation - 1))
14776     {
14777         if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen))
14778         {
14779             n = max (n, max_generation - 1);
14780             local_settings->promotion = TRUE;
14781             dprintf (GTC_LOG, ("h%d: skip %d, c %d",
14782                         heap_number, generation_skip_ratio, n));
14783             local_condemn_reasons->set_condition (gen_low_card_p);
14784         }
14785     }
14786
14787     if (!check_only_p)
14788     {
14789         generation_skip_ratio = 100;
14790     }
14791
14792     if (dt_low_ephemeral_space_p (check_only_p ? 
14793                                   tuning_deciding_full_gc : 
14794                                   tuning_deciding_condemned_gen))
14795     {
14796         low_ephemeral_space = TRUE;
14797
14798         n = max (n, max_generation - 1);
14799         local_condemn_reasons->set_condition (gen_low_ephemeral_p);
14800         dprintf (GTC_LOG, ("h%d: low eph", heap_number));
14801
14802 #ifdef BACKGROUND_GC
14803         if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0))
14804 #endif //BACKGROUND_GC
14805         {
14806             //It is better to defragment first if we are running out of space for
14807             //the ephemeral generation but we have enough fragmentation to make up for it
14808             //in the non ephemeral generation. Essentially we are trading a gen2 for 
14809             // having to expand heap in ephemeral collections.
14810             if (dt_high_frag_p (tuning_deciding_condemned_gen, 
14811                                 max_generation - 1, 
14812                                 TRUE))
14813             {
14814                 high_fragmentation = TRUE;
14815                 local_condemn_reasons->set_condition (gen_max_high_frag_e_p);
14816                 dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number));
14817             }
14818         }
14819     }
14820
14821     //figure out which ephemeral generation is too fragramented
14822     temp_gen = n;
14823     for (i = n+1; i < max_generation; i++)
14824     {
14825         if (dt_high_frag_p (tuning_deciding_condemned_gen, i))
14826         {
14827             dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i));
14828             n = i;
14829         }
14830         else
14831             break;
14832     }
14833
14834     if (low_ephemeral_space)
14835     {
14836         //enable promotion
14837         local_settings->promotion = TRUE;
14838     }
14839
14840     if (n > temp_gen)
14841     {
14842         local_condemn_reasons->set_condition (gen_eph_high_frag_p);
14843     }
14844
14845     if (!check_only_p)
14846     {
14847         if (settings.pause_mode == pause_low_latency)
14848         {
14849             if (!is_induced (settings.reason))
14850             {
14851                 n = min (n, max_generation - 1);
14852                 dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n));
14853                 evaluate_elevation = FALSE;
14854                 goto exit;
14855             }
14856         }
14857     }
14858
14859     // It's hard to catch when we get to the point that the memory load is so high
14860     // we get an induced GC from the finalizer thread so we are checking the memory load
14861     // for every gen0 GC.
14862     check_memory = (check_only_p ? 
14863                     (n >= 0) : 
14864                     ((n >= 1) || low_memory_detected));
14865
14866     if (check_memory)
14867     {
14868         //find out if we are short on memory
14869         get_memory_info (&memory_load, &available_physical, &available_page_file);
14870         if (heap_number == 0)
14871         {
14872             dprintf (GTC_LOG, ("ml: %d", memory_load));
14873         }
14874         
14875         // Need to get it early enough for all heaps to use.
14876         entry_available_physical_mem = available_physical;
14877         local_settings->entry_memory_load = memory_load;
14878
14879         // @TODO: Force compaction more often under GCSTRESS
14880         if (memory_load >= high_memory_load_th || low_memory_detected)
14881         {
14882 #ifdef SIMPLE_DPRINTF
14883             // stress log can't handle any parameter that's bigger than a void*.
14884             if (heap_number == 0)
14885             {
14886                 dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical));
14887             }
14888 #endif //SIMPLE_DPRINTF
14889
14890             high_memory_load = TRUE;
14891
14892             if (memory_load >= v_high_memory_load_th || low_memory_detected)
14893             {
14894                 // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since
14895                 // gen1/gen0 may take a lot more memory than gen2.
14896                 if (!high_fragmentation)
14897                 {
14898                     high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation);
14899                 }
14900                 v_high_memory_load = TRUE;
14901             }
14902             else
14903             {
14904                 if (!high_fragmentation)
14905                 {
14906                     high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical);
14907                 }
14908             }
14909
14910             if (high_fragmentation)
14911             {
14912                 if (high_memory_load)
14913                 {
14914                     local_condemn_reasons->set_condition (gen_max_high_frag_m_p);
14915                 }
14916                 else if (v_high_memory_load)
14917                 {
14918                     local_condemn_reasons->set_condition (gen_max_high_frag_vm_p);
14919                 }
14920             }
14921         }
14922     }
14923
14924     dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d",
14925                  heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load,
14926                  high_fragmentation));
14927
14928     if (should_expand_in_full_gc)
14929     {
14930         dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number));
14931         *blocking_collection_p = TRUE;
14932         if (!check_only_p)
14933         {
14934             should_expand_in_full_gc = FALSE;
14935         }
14936         evaluate_elevation = FALSE;
14937         n = max_generation;
14938         local_condemn_reasons->set_condition (gen_expand_fullgc_p);
14939     }
14940
14941     if (last_gc_before_oom)
14942     {
14943         dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
14944         n = max_generation;
14945         *blocking_collection_p = TRUE;
14946         if ((local_settings->reason == reason_oos_loh) ||
14947             (local_settings->reason == reason_alloc_loh))
14948             evaluate_elevation = FALSE;
14949
14950         local_condemn_reasons->set_condition (gen_before_oom);
14951     }
14952
14953     if (!check_only_p)
14954     {
14955         if (is_induced_blocking (settings.reason) && 
14956             n_initial == max_generation
14957             IN_STRESS_HEAP( && !settings.stress_induced ))
14958         {
14959             if (heap_number == 0)
14960             {
14961                 dprintf (GTC_LOG, ("induced - BLOCK"));
14962             }
14963
14964             *blocking_collection_p = TRUE;
14965             local_condemn_reasons->set_condition (gen_induced_fullgc_p);
14966             evaluate_elevation = FALSE;
14967         }
14968
14969         if (settings.reason == reason_induced_noforce)
14970         {
14971             local_condemn_reasons->set_condition (gen_induced_noforce_p);
14972             evaluate_elevation = FALSE;
14973         }
14974     }
14975
14976     if (evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load))
14977     {
14978         *elevation_requested_p = TRUE;
14979 #ifdef BIT64
14980         // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now.
14981         if (high_memory_load || v_high_memory_load)
14982         {
14983             dynamic_data* dd_max = dynamic_data_of (max_generation);
14984             if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9)
14985             {
14986                 dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)", 
14987                     dd_new_allocation (dd_max), dd_desired_allocation (dd_max)));
14988                 n = max_generation;
14989                 local_condemn_reasons->set_condition (gen_almost_max_alloc);
14990             }
14991         }
14992
14993         if (n <= max_generation)
14994         {
14995 #endif // BIT64
14996             if (high_fragmentation)
14997             {
14998                 //elevate to max_generation
14999                 n = max_generation;
15000                 dprintf (GTC_LOG, ("h%d: f full", heap_number));
15001
15002 #ifdef BACKGROUND_GC
15003                 if (high_memory_load || v_high_memory_load)
15004                 {
15005                     // For background GC we want to do blocking collections more eagerly because we don't
15006                     // want to get into the situation where the memory load becomes high while we are in
15007                     // a background GC and we'd have to wait for the background GC to finish to start
15008                     // a blocking collection (right now the implemenation doesn't handle converting 
15009                     // a background GC to a blocking collection midway.
15010                     dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number));
15011                     *blocking_collection_p = TRUE;
15012                 }
15013 #else
15014                 if (v_high_memory_load)
15015                 {
15016                     dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number));
15017                     *blocking_collection_p = TRUE;
15018                 }
15019 #endif //BACKGROUND_GC
15020             }
15021             else
15022             {
15023                 n = max (n, max_generation - 1);
15024                 dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n));
15025             }
15026 #ifdef BIT64
15027         }
15028 #endif // BIT64
15029     }
15030
15031     if ((n == (max_generation - 1)) && (n_alloc < (max_generation -1)))
15032     {
15033         dprintf (GTC_LOG, ("h%d: budget %d, check 2",
15034                       heap_number, n_alloc));
15035         if (get_new_allocation (max_generation) <= 0)
15036         {
15037             dprintf (GTC_LOG, ("h%d: budget alloc", heap_number));
15038             n = max_generation;
15039             local_condemn_reasons->set_condition (gen_max_gen1);
15040         }
15041     }
15042
15043     //figure out if max_generation is too fragmented -> blocking collection
15044     if (n == max_generation)
15045     {
15046         if (dt_high_frag_p (tuning_deciding_condemned_gen, n))
15047         {
15048             dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n));
15049             local_condemn_reasons->set_condition (gen_max_high_frag_p);
15050             if (local_settings->pause_mode != pause_sustained_low_latency)
15051             {
15052                 *blocking_collection_p = TRUE;
15053             }
15054         }
15055     }
15056
15057 #ifdef BACKGROUND_GC
15058     if (n == max_generation)
15059     {
15060         if (heap_number == 0)
15061         {
15062             BOOL bgc_heap_too_small = TRUE;
15063             size_t gen2size = 0;
15064             size_t gen3size = 0;
15065 #ifdef MULTIPLE_HEAPS
15066             for (int i = 0; i < n_heaps; i++)
15067             {
15068                 if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) || 
15069                     ((g_heaps[i]->current_generation_size (max_generation + 1)) > bgc_min_per_heap))
15070                 {
15071                     bgc_heap_too_small = FALSE;
15072                     break;
15073                 }
15074             }
15075 #else //MULTIPLE_HEAPS
15076             if ((current_generation_size (max_generation) > bgc_min_per_heap) || 
15077                 (current_generation_size (max_generation + 1) > bgc_min_per_heap))
15078             {
15079                 bgc_heap_too_small = FALSE;
15080             }            
15081 #endif //MULTIPLE_HEAPS
15082
15083             if (bgc_heap_too_small)
15084             {
15085                 dprintf (GTC_LOG, ("gen2 and gen3 too small"));
15086
15087 #ifdef STRESS_HEAP
15088                 // do not turn stress-induced collections into blocking GCs
15089                 if (!settings.stress_induced)
15090 #endif //STRESS_HEAP
15091                 {
15092                     *blocking_collection_p = TRUE;
15093                 }
15094
15095                 local_condemn_reasons->set_condition (gen_gen2_too_small);
15096             }
15097         }
15098     }
15099 #endif //BACKGROUND_GC
15100
15101 exit:
15102     if (!check_only_p)
15103     {
15104 #ifdef STRESS_HEAP
15105 #ifdef BACKGROUND_GC
15106         // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
15107         // generations to be collected,
15108
15109         if (orig_gen != max_generation &&
15110             g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
15111         {
15112             *elevation_requested_p = FALSE;
15113         }
15114 #endif //BACKGROUND_GC
15115 #endif //STRESS_HEAP
15116
15117         if (check_memory)
15118         {
15119             fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024));
15120         }
15121
15122         local_condemn_reasons->set_gen (gen_final_per_heap, n);
15123         get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons);
15124
15125 #ifdef DT_LOG
15126         local_condemn_reasons->print (heap_number);
15127 #endif //DT_LOG
15128
15129         if ((local_settings->reason == reason_oos_soh) || 
15130             (local_settings->reason == reason_oos_loh))
15131         {
15132             assert (n >= 1);
15133         }
15134     }
15135
15136     if (n == max_generation && GCToEEInterface::ForceFullGCToBeBlocking())
15137     {
15138 #ifdef BACKGROUND_GC
15139         // do not turn stress-induced collections into blocking GCs, unless there
15140         // have already been more full BGCs than full NGCs
15141 #if 0
15142         // This exposes DevDiv 94129, so we'll leave this out for now
15143         if (!settings.stress_induced ||
15144             full_gc_counts[gc_type_blocking] <= full_gc_counts[gc_type_background])
15145 #endif // 0
15146 #endif // BACKGROUND_GC
15147         {
15148             *blocking_collection_p = TRUE;
15149         }
15150     }
15151
15152     return n;
15153 }
15154
15155 #ifdef _PREFAST_
15156 #pragma warning(pop)
15157 #endif //_PREFAST_
15158
15159 inline
15160 size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
15161 {
15162     // if the memory load is higher, the threshold we'd want to collect gets lower.
15163     size_t min_mem_based_on_available = 
15164         (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;
15165     size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
15166     uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;
15167
15168 #ifdef SIMPLE_DPRINTF
15169     dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d", 
15170         min_mem_based_on_available, ten_percent_size, three_percent_mem));
15171 #endif //SIMPLE_DPRINTF
15172     return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem)));
15173 }
15174
15175 inline
15176 uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps)
15177 {
15178     return min (available_mem, (256*1024*1024)) / num_heaps;
15179 }
15180
15181 enum {
15182 CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC'
15183 };
15184
15185
15186 #ifdef BACKGROUND_GC
15187 void gc_heap::init_background_gc ()
15188 {
15189     //reset the allocation so foreground gc can allocate into older (max_generation) generation
15190     generation* gen = generation_of (max_generation);
15191     generation_allocation_pointer (gen)= 0;
15192     generation_allocation_limit (gen) = 0;
15193     generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
15194
15195     PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
15196
15197     //reset the plan allocation for each segment
15198     for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment;
15199         seg = heap_segment_next_rw (seg))
15200     {
15201         heap_segment_plan_allocated (seg) = heap_segment_allocated (seg);
15202     }
15203
15204     if (heap_number == 0)
15205     {
15206         dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix", 
15207             heap_number,
15208             background_saved_lowest_address, 
15209             background_saved_highest_address));
15210     }
15211
15212     gc_lh_block_event.Reset();
15213 }
15214
15215 #endif //BACKGROUND_GC
15216
15217 inline
15218 void fire_drain_mark_list_event (size_t mark_list_objects)
15219 {
15220     FIRE_EVENT(BGCDrainMark, mark_list_objects);
15221 }
15222
15223 inline
15224 void fire_revisit_event (size_t dirtied_pages, 
15225                          size_t marked_objects,
15226                          BOOL large_objects_p)
15227 {
15228     FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p);
15229 }
15230
15231 inline
15232 void fire_overflow_event (uint8_t* overflow_min,
15233                           uint8_t* overflow_max,
15234                           size_t marked_objects, 
15235                           int large_objects_p)
15236 {
15237     FIRE_EVENT(BGCOverflow, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, large_objects_p);
15238 }
15239
15240 void gc_heap::concurrent_print_time_delta (const char* msg)
15241 {
15242 #ifdef TRACE_GC
15243     size_t current_time = GetHighPrecisionTimeStamp();
15244     size_t elapsed_time = current_time - time_bgc_last;
15245     time_bgc_last = current_time;
15246
15247     dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
15248 #else
15249     UNREFERENCED_PARAMETER(msg);
15250 #endif //TRACE_GC
15251 }
15252
15253 void gc_heap::free_list_info (int gen_num, const char* msg)
15254 {
15255     UNREFERENCED_PARAMETER(gen_num);
15256 #if defined (BACKGROUND_GC) && defined (TRACE_GC)
15257     dprintf (3, ("h%d: %s", heap_number, msg));
15258     for (int i = 0; i <= (max_generation + 1); i++)
15259     {
15260         generation* gen = generation_of (i);
15261         if ((generation_allocation_size (gen) == 0) && 
15262             (generation_free_list_space (gen) == 0) && 
15263             (generation_free_obj_space (gen) == 0))
15264         {
15265             // don't print if everything is 0.
15266         }
15267         else
15268         {
15269             dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id",
15270                 heap_number, i, 
15271                 generation_allocation_size (gen), 
15272                 generation_free_list_space (gen), 
15273                 generation_free_obj_space (gen)));
15274         }
15275     }
15276 #else
15277     UNREFERENCED_PARAMETER(msg);
15278 #endif // BACKGROUND_GC && TRACE_GC
15279 }
15280
15281 void gc_heap::update_collection_counts_for_no_gc()
15282 {
15283     assert (settings.pause_mode == pause_no_gc);
15284
15285     settings.condemned_generation = max_generation;
15286 #ifdef MULTIPLE_HEAPS
15287     for (int i = 0; i < n_heaps; i++)
15288         g_heaps[i]->update_collection_counts();
15289 #else //MULTIPLE_HEAPS
15290     update_collection_counts();
15291 #endif //MULTIPLE_HEAPS
15292
15293     full_gc_counts[gc_type_blocking]++;
15294 }
15295
15296 BOOL gc_heap::should_proceed_with_gc()
15297 {
15298     if (gc_heap::settings.pause_mode == pause_no_gc)
15299     {
15300         if (current_no_gc_region_info.started)
15301         {
15302             // The no_gc mode was already in progress yet we triggered another GC,
15303             // this effectively exits the no_gc mode.
15304             restore_data_for_no_gc();
15305         }
15306         else
15307             return should_proceed_for_no_gc();
15308     }
15309
15310     return TRUE;
15311 }
15312
15313 //internal part of gc used by the serial and concurrent version
15314 void gc_heap::gc1()
15315 {
15316 #ifdef BACKGROUND_GC
15317     assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15318 #endif //BACKGROUND_GC
15319
15320 #ifdef TIME_GC
15321     mark_time = plan_time = reloc_time = compact_time = sweep_time = 0;
15322 #endif //TIME_GC
15323
15324     verify_soh_segment_list();
15325
15326     int n = settings.condemned_generation;
15327
15328     update_collection_counts ();
15329
15330 #ifdef BACKGROUND_GC
15331     bgc_alloc_lock->check();
15332 #endif //BACKGROUND_GC
15333
15334     free_list_info (max_generation, "beginning");
15335
15336     vm_heap->GcCondemnedGeneration = settings.condemned_generation;
15337
15338     assert (g_gc_card_table == card_table);
15339
15340 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
15341     assert (g_gc_card_bundle_table == card_bundle_table);
15342 #endif    
15343
15344     {
15345         if (n == max_generation)
15346         {
15347             gc_low = lowest_address;
15348             gc_high = highest_address;
15349         }
15350         else
15351         {
15352             gc_low = generation_allocation_start (generation_of (n));
15353             gc_high = heap_segment_reserved (ephemeral_heap_segment);
15354         }   
15355 #ifdef BACKGROUND_GC
15356         if (settings.concurrent)
15357         {
15358 #ifdef TRACE_GC
15359             time_bgc_last = GetHighPrecisionTimeStamp();
15360 #endif //TRACE_GC
15361
15362             FIRE_EVENT(BGCBegin);
15363
15364             concurrent_print_time_delta ("BGC");
15365
15366 //#ifdef WRITE_WATCH
15367             //reset_write_watch (FALSE);
15368 //#endif //WRITE_WATCH
15369
15370             concurrent_print_time_delta ("RW");
15371             background_mark_phase();
15372             free_list_info (max_generation, "after mark phase");
15373             
15374             background_sweep();
15375             free_list_info (max_generation, "after sweep phase");
15376         }
15377         else
15378 #endif //BACKGROUND_GC
15379         {
15380             mark_phase (n, FALSE);
15381
15382             GCScan::GcRuntimeStructuresValid (FALSE);
15383             plan_phase (n);
15384             GCScan::GcRuntimeStructuresValid (TRUE);
15385         }
15386     }
15387
15388     size_t end_gc_time = GetHighPrecisionTimeStamp();
15389 //    printf ("generation: %d, elapsed time: %Id\n", n,  end_gc_time - dd_time_clock (dynamic_data_of (0)));
15390
15391     //adjust the allocation size from the pinned quantities. 
15392     for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++)
15393     {
15394         generation* gn = generation_of (gen_number);
15395         if (settings.compaction)
15396         {
15397             generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn);
15398             generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn);
15399         }
15400         else
15401         {
15402             generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn);
15403             generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn);
15404         }
15405         generation_pinned_allocation_sweep_size (gn) = 0;
15406         generation_pinned_allocation_compact_size (gn) = 0;
15407     }
15408
15409 #ifdef BACKGROUND_GC
15410     if (settings.concurrent)
15411     {
15412         dynamic_data* dd = dynamic_data_of (n);
15413         dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15414
15415         free_list_info (max_generation, "after computing new dynamic data");
15416
15417         gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
15418
15419         for (int gen_number = 0; gen_number < max_generation; gen_number++)
15420         {
15421             dprintf (2, ("end of BGC: gen%d new_alloc: %Id", 
15422                          gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15423             current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number);
15424             current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15425             current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15426         }
15427     }
15428     else
15429 #endif //BACKGROUND_GC
15430     {
15431         free_list_info (max_generation, "end");
15432         for (int gen_number = 0; gen_number <= n; gen_number++)
15433         {
15434             dynamic_data* dd = dynamic_data_of (gen_number);
15435             dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15436             compute_new_dynamic_data (gen_number);
15437         }
15438
15439         if (n != max_generation)
15440         {
15441             int gen_num_for_data = ((n < (max_generation - 1)) ? (n + 1) : (max_generation + 1));
15442             for (int gen_number = (n + 1); gen_number <= gen_num_for_data; gen_number++)
15443             {
15444                 get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number);
15445                 get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15446                 get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15447             }
15448         }
15449
15450         get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100);
15451
15452         free_list_info (max_generation, "after computing new dynamic data");
15453         
15454         if (heap_number == 0)
15455         {
15456             dprintf (GTC_LOG, ("GC#%d(gen%d) took %Idms", 
15457                 dd_collection_count (dynamic_data_of (0)), 
15458                 settings.condemned_generation,
15459                 dd_gc_elapsed_time (dynamic_data_of (0))));
15460         }
15461
15462         for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
15463         {
15464             dprintf (2, ("end of FGC/NGC: gen%d new_alloc: %Id", 
15465                          gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15466         }
15467     }
15468
15469     if (n < max_generation)
15470     {
15471         compute_promoted_allocation (1 + n);
15472
15473         dynamic_data* dd = dynamic_data_of (1 + n);
15474         size_t new_fragmentation = generation_free_list_space (generation_of (1 + n)) + 
15475                                    generation_free_obj_space (generation_of (1 + n));
15476
15477 #ifdef BACKGROUND_GC
15478         if (current_c_gc_state != c_gc_state_planning)
15479 #endif //BACKGROUND_GC
15480         {
15481             if (settings.promotion)
15482             {
15483                 dd_fragmentation (dd) = new_fragmentation;
15484             }
15485             else
15486             {
15487                 //assert (dd_fragmentation (dd) == new_fragmentation);
15488             }
15489         }
15490     }
15491
15492 #ifdef BACKGROUND_GC
15493     if (!settings.concurrent)
15494 #endif //BACKGROUND_GC
15495     {
15496 #ifndef FEATURE_REDHAWK
15497         // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
15498         assert(GCToEEInterface::IsGCThread());
15499 #endif // FEATURE_REDHAWK
15500         adjust_ephemeral_limits();
15501     }
15502
15503 #ifdef BACKGROUND_GC
15504     assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1)));
15505     assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
15506 #endif //BACKGROUND_GC
15507
15508     if (fgn_maxgen_percent)
15509     {
15510         if (settings.condemned_generation == (max_generation - 1))
15511         {
15512             check_for_full_gc (max_generation - 1, 0);
15513         }
15514         else if (settings.condemned_generation == max_generation)
15515         {
15516             if (full_gc_approach_event_set 
15517 #ifdef MULTIPLE_HEAPS
15518                 && (heap_number == 0)
15519 #endif //MULTIPLE_HEAPS
15520                 )
15521             {
15522                 dprintf (2, ("FGN-GC: setting gen2 end event"));
15523
15524                 full_gc_approach_event.Reset();
15525 #ifdef BACKGROUND_GC
15526                 // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A
15527                 fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE;
15528 #endif //BACKGROUND_GC
15529                 full_gc_end_event.Set();
15530                 full_gc_approach_event_set = false;            
15531             }
15532         }
15533     }
15534
15535 #ifdef BACKGROUND_GC
15536     if (!settings.concurrent)
15537 #endif //BACKGROUND_GC
15538     {
15539         //decide on the next allocation quantum
15540         if (alloc_contexts_used >= 1)
15541         {
15542             allocation_quantum = Align (min ((size_t)CLR_SIZE,
15543                                             (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))),
15544                                             get_alignment_constant(FALSE));
15545             dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum));
15546         }
15547     }
15548
15549     descr_generations (FALSE);
15550
15551     verify_soh_segment_list();
15552
15553 #ifdef BACKGROUND_GC
15554     add_to_history_per_heap();
15555     if (heap_number == 0)
15556     {
15557         add_to_history();
15558     }
15559 #endif // BACKGROUND_GC
15560
15561 #ifdef GC_STATS
15562     if (GCStatistics::Enabled() && heap_number == 0)
15563         g_GCStatistics.AddGCStats(settings, 
15564             dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
15565 #endif // GC_STATS
15566
15567 #ifdef TIME_GC
15568     fprintf (stdout, "%d,%d,%d,%d,%d,%d\n",
15569              n, mark_time, plan_time, reloc_time, compact_time, sweep_time);
15570 #endif //TIME_GC
15571
15572 #ifdef BACKGROUND_GC
15573     assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15574 #endif //BACKGROUND_GC
15575
15576 #if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15577     if (FALSE 
15578 #ifdef VERIFY_HEAP
15579         // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same
15580         // value. If we ever allow randomly adjusting this as the process runs,
15581         // we cannot call it this way as joins need to match - we must have the same
15582         // value for all heaps like we do with bgc_heap_walk_for_etw_p.
15583         || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15584 #endif
15585 #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)
15586         || (bgc_heap_walk_for_etw_p && settings.concurrent)
15587 #endif
15588         )
15589     {
15590 #ifdef BACKGROUND_GC
15591         bool cooperative_mode = true;
15592
15593         if (settings.concurrent)
15594         {
15595             cooperative_mode = enable_preemptive ();
15596
15597 #ifdef MULTIPLE_HEAPS
15598             bgc_t_join.join(this, gc_join_suspend_ee_verify);
15599             if (bgc_t_join.joined())
15600             {
15601                 bgc_threads_sync_event.Reset();
15602
15603                 dprintf(2, ("Joining BGC threads to suspend EE for verify heap"));
15604                 bgc_t_join.restart();
15605             }
15606             if (heap_number == 0)
15607             {
15608                 suspend_EE();
15609                 bgc_threads_sync_event.Set();
15610             }
15611             else
15612             {
15613                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15614                 dprintf (2, ("bgc_threads_sync_event is signalled"));
15615             }
15616 #else
15617             suspend_EE();
15618 #endif //MULTIPLE_HEAPS
15619
15620             //fix the allocation area so verify_heap can proceed.
15621             fix_allocation_contexts (FALSE);
15622         }
15623 #endif //BACKGROUND_GC
15624
15625 #ifdef BACKGROUND_GC
15626         assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15627 #ifdef FEATURE_EVENT_TRACE
15628         if (bgc_heap_walk_for_etw_p && settings.concurrent)
15629         {
15630             GCToEEInterface::DiagWalkBGCSurvivors(__this);
15631
15632 #ifdef MULTIPLE_HEAPS
15633             bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
15634             if (bgc_t_join.joined())
15635             {
15636                 bgc_t_join.restart();
15637             }
15638 #endif // MULTIPLE_HEAPS
15639         }
15640 #endif // FEATURE_EVENT_TRACE
15641 #endif //BACKGROUND_GC
15642
15643 #ifdef VERIFY_HEAP
15644         if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15645             verify_heap (FALSE);
15646 #endif // VERIFY_HEAP
15647
15648 #ifdef BACKGROUND_GC
15649         if (settings.concurrent)
15650         {
15651             repair_allocation_contexts (TRUE);
15652
15653 #ifdef MULTIPLE_HEAPS
15654             bgc_t_join.join(this, gc_join_restart_ee_verify);
15655             if (bgc_t_join.joined())
15656             {
15657                 bgc_threads_sync_event.Reset();
15658
15659                 dprintf(2, ("Joining BGC threads to restart EE after verify heap"));
15660                 bgc_t_join.restart();
15661             }
15662             if (heap_number == 0)
15663             {
15664                 restart_EE();
15665                 bgc_threads_sync_event.Set();
15666             }
15667             else
15668             {
15669                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15670                 dprintf (2, ("bgc_threads_sync_event is signalled"));
15671             }
15672 #else
15673             restart_EE();
15674 #endif //MULTIPLE_HEAPS
15675
15676             disable_preemptive (cooperative_mode);
15677         }
15678 #endif //BACKGROUND_GC
15679     }
15680 #endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15681
15682 #ifdef MULTIPLE_HEAPS
15683     if (!settings.concurrent)
15684     {
15685         gc_t_join.join(this, gc_join_done);
15686         if (gc_t_join.joined ())
15687         {
15688             gc_heap::internal_gc_done = false;
15689
15690             //equalize the new desired size of the generations
15691             int limit = settings.condemned_generation;
15692             if (limit == max_generation)
15693             {
15694                 limit = max_generation+1;
15695             }
15696             for (int gen = 0; gen <= limit; gen++)
15697             {
15698                 size_t total_desired = 0;
15699
15700                 for (int i = 0; i < gc_heap::n_heaps; i++)
15701                 {
15702                     gc_heap* hp = gc_heap::g_heaps[i];
15703                     dynamic_data* dd = hp->dynamic_data_of (gen);
15704                     size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
15705                     if (temp_total_desired < total_desired)
15706                     {
15707                         // we overflowed.
15708                         total_desired = (size_t)MAX_PTR;
15709                         break;
15710                     }
15711                     total_desired = temp_total_desired;
15712                 }
15713
15714                 size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps,
15715                                                     get_alignment_constant ((gen != (max_generation+1))));
15716
15717                 if (gen == 0)
15718                 {
15719 #if 1 //subsumed by the linear allocation model 
15720                     // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
15721                     // apply some smoothing.
15722                     static size_t smoothed_desired_per_heap = 0;
15723                     size_t smoothing = 3; // exponential smoothing factor
15724                     if (smoothing  > VolatileLoad(&settings.gc_index))
15725                         smoothing  = VolatileLoad(&settings.gc_index);
15726                     smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap / smoothing) * (smoothing-1));
15727                     dprintf (1, ("sn = %Id  n = %Id", smoothed_desired_per_heap, desired_per_heap));
15728                     desired_per_heap = Align(smoothed_desired_per_heap, get_alignment_constant (true));
15729 #endif //0
15730
15731                     // if desired_per_heap is close to min_gc_size, trim it
15732                     // down to min_gc_size to stay in the cache
15733                     gc_heap* hp = gc_heap::g_heaps[0];
15734                     dynamic_data* dd = hp->dynamic_data_of (gen);
15735                     size_t min_gc_size = dd_min_size(dd);
15736                     // if min GC size larger than true on die cache, then don't bother
15737                     // limiting the desired size
15738                     if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) &&
15739                         desired_per_heap <= 2*min_gc_size)
15740                     {
15741                         desired_per_heap = min_gc_size;
15742                     }
15743 #ifdef BIT64
15744                     desired_per_heap = joined_youngest_desired (desired_per_heap);
15745                     dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap));
15746 #endif // BIT64
15747
15748                     gc_data_global.final_youngest_desired = desired_per_heap;
15749                 }
15750 #if 1 //subsumed by the linear allocation model 
15751                 if (gen == (max_generation + 1))
15752                 {
15753                     // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
15754                     // apply some smoothing.
15755                     static size_t smoothed_desired_per_heap_loh = 0;
15756                     size_t smoothing = 3; // exponential smoothing factor
15757                     size_t loh_count = dd_collection_count (dynamic_data_of (max_generation));
15758                     if (smoothing  > loh_count)
15759                         smoothing  = loh_count;
15760                     smoothed_desired_per_heap_loh = desired_per_heap / smoothing + ((smoothed_desired_per_heap_loh / smoothing) * (smoothing-1));
15761                     dprintf( 2, ("smoothed_desired_per_heap_loh  = %Id  desired_per_heap = %Id", smoothed_desired_per_heap_loh, desired_per_heap));
15762                     desired_per_heap = Align(smoothed_desired_per_heap_loh, get_alignment_constant (false));
15763                 }
15764 #endif //0
15765                 for (int i = 0; i < gc_heap::n_heaps; i++)
15766                 {
15767                     gc_heap* hp = gc_heap::g_heaps[i];
15768                     dynamic_data* dd = hp->dynamic_data_of (gen);
15769                     dd_desired_allocation (dd) = desired_per_heap;
15770                     dd_gc_new_allocation (dd) = desired_per_heap;
15771                     dd_new_allocation (dd) = desired_per_heap;
15772
15773                     if (gen == 0)
15774                     {
15775                         hp->fgn_last_alloc = desired_per_heap;
15776                     }
15777                 }
15778             }
15779
15780 #ifdef FEATURE_LOH_COMPACTION
15781             BOOL all_heaps_compacted_p = TRUE;
15782 #endif //FEATURE_LOH_COMPACTION
15783             for (int i = 0; i < gc_heap::n_heaps; i++)
15784             {
15785                 gc_heap* hp = gc_heap::g_heaps[i];
15786                 hp->decommit_ephemeral_segment_pages();
15787                 hp->rearrange_large_heap_segments();
15788 #ifdef FEATURE_LOH_COMPACTION
15789                 all_heaps_compacted_p &= hp->loh_compacted_p;
15790 #endif //FEATURE_LOH_COMPACTION
15791             }
15792
15793 #ifdef FEATURE_LOH_COMPACTION
15794             check_loh_compact_mode (all_heaps_compacted_p);
15795 #endif //FEATURE_LOH_COMPACTION
15796
15797             fire_pevents();
15798
15799             gc_t_join.restart();
15800         }
15801         alloc_context_count = 0;
15802         heap_select::mark_heap (heap_number);
15803     }
15804
15805 #else
15806     gc_data_global.final_youngest_desired = 
15807         dd_desired_allocation (dynamic_data_of (0));
15808
15809     check_loh_compact_mode (loh_compacted_p);
15810
15811     decommit_ephemeral_segment_pages();
15812     fire_pevents();
15813
15814     if (!(settings.concurrent))
15815     {
15816         rearrange_large_heap_segments();
15817         do_post_gc();
15818     }
15819
15820 #ifdef BACKGROUND_GC
15821     recover_bgc_settings();
15822 #endif //BACKGROUND_GC
15823 #endif //MULTIPLE_HEAPS
15824 }
15825
15826 void gc_heap::save_data_for_no_gc()
15827 {
15828     current_no_gc_region_info.saved_pause_mode = settings.pause_mode;
15829 #ifdef MULTIPLE_HEAPS
15830     // This is to affect heap balancing. 
15831     for (int i = 0; i < n_heaps; i++)
15832     {
15833         current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0));
15834         dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold;
15835         current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1));
15836         dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = 0;
15837     }
15838 #endif //MULTIPLE_HEAPS
15839 }
15840
15841 void gc_heap::restore_data_for_no_gc()
15842 {
15843     gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode;
15844 #ifdef MULTIPLE_HEAPS
15845     for (int i = 0; i < n_heaps; i++)
15846     {
15847         dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size;
15848         dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = current_no_gc_region_info.saved_gen3_min_size;
15849     }
15850 #endif //MULTIPLE_HEAPS
15851 }
15852
15853 start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size,
15854                                                              BOOL loh_size_known, 
15855                                                              uint64_t loh_size,
15856                                                              BOOL disallow_full_blocking)
15857 {
15858     if (current_no_gc_region_info.started)
15859     {
15860         return start_no_gc_in_progress;
15861     }
15862
15863     start_no_gc_region_status status = start_no_gc_success;
15864
15865     save_data_for_no_gc();
15866     settings.pause_mode = pause_no_gc;
15867     current_no_gc_region_info.start_status = start_no_gc_success;
15868
15869     uint64_t allocation_no_gc_loh = 0;
15870     uint64_t allocation_no_gc_soh = 0;
15871     assert(total_size != 0);
15872     if (loh_size_known)
15873     {
15874         assert(loh_size != 0);
15875         assert(loh_size <= total_size);
15876         allocation_no_gc_loh = loh_size;
15877         allocation_no_gc_soh = total_size - loh_size;
15878     }
15879     else
15880     {
15881         allocation_no_gc_soh = total_size;
15882         allocation_no_gc_loh = total_size;
15883     }
15884
15885     int soh_align_const = get_alignment_constant (TRUE);
15886     size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size;
15887     size_t size_per_heap = 0;
15888     const double scale_factor = 1.05;
15889
15890     int num_heaps = 1;
15891 #ifdef MULTIPLE_HEAPS
15892     num_heaps = n_heaps;
15893 #endif // MULTIPLE_HEAPS
15894
15895     uint64_t total_allowed_soh_allocation = max_soh_allocated * num_heaps;
15896     // [LOCALGC TODO]
15897     // In theory, the upper limit here is the physical memory of the machine, not
15898     // SIZE_T_MAX. This is not true today because total_physical_mem can be
15899     // larger than SIZE_T_MAX if running in wow64 on a machine with more than
15900     // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing
15901     // more freely between branches, it would be good to clean this up to use
15902     // total_physical_mem instead of SIZE_T_MAX.
15903     assert(total_allowed_soh_allocation <= SIZE_T_MAX);
15904     uint64_t total_allowed_loh_allocation = SIZE_T_MAX;
15905     uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0;
15906     uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0;
15907
15908     if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled ||
15909         allocation_no_gc_loh > total_allowed_loh_alloc_scaled)
15910     {
15911         status = start_no_gc_too_large;
15912         goto done;
15913     }
15914
15915     if (allocation_no_gc_soh > 0)
15916     {
15917         allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor);
15918         allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled);
15919     }
15920
15921     if (allocation_no_gc_loh > 0)
15922     {
15923         allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor);
15924         allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled);
15925     }
15926
15927     if (disallow_full_blocking)
15928         current_no_gc_region_info.minimal_gc_p = TRUE;
15929
15930     if (allocation_no_gc_soh != 0)
15931     {
15932         current_no_gc_region_info.soh_allocation_size = static_cast<size_t>(allocation_no_gc_soh);
15933         size_per_heap = current_no_gc_region_info.soh_allocation_size;
15934 #ifdef MULTIPLE_HEAPS
15935         size_per_heap /= n_heaps;
15936         for (int i = 0; i < n_heaps; i++)
15937         {
15938             // due to heap balancing we need to allow some room before we even look to balance to another heap.
15939             g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated);
15940         }
15941 #else //MULTIPLE_HEAPS
15942         soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated);
15943 #endif //MULTIPLE_HEAPS
15944     }
15945
15946     if (allocation_no_gc_loh != 0)
15947     {
15948         current_no_gc_region_info.loh_allocation_size = static_cast<size_t>(allocation_no_gc_loh);
15949         size_per_heap = current_no_gc_region_info.loh_allocation_size;
15950 #ifdef MULTIPLE_HEAPS
15951         size_per_heap /= n_heaps;
15952         for (int i = 0; i < n_heaps; i++)
15953             g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
15954 #else //MULTIPLE_HEAPS
15955         loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
15956 #endif //MULTIPLE_HEAPS
15957     }
15958
15959 done:
15960     if (status != start_no_gc_success)
15961         restore_data_for_no_gc();
15962     return status;
15963 }
15964
15965 void gc_heap::handle_failure_for_no_gc()
15966 {
15967     gc_heap::restore_data_for_no_gc();
15968     // sets current_no_gc_region_info.started to FALSE here.
15969     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
15970 }
15971
15972 start_no_gc_region_status gc_heap::get_start_no_gc_region_status()
15973 {
15974     return current_no_gc_region_info.start_status;
15975 }
15976
15977 void gc_heap::record_gcs_during_no_gc()
15978 {
15979     if (current_no_gc_region_info.started)
15980     {
15981         current_no_gc_region_info.num_gcs++;
15982         if (is_induced (settings.reason))
15983             current_no_gc_region_info.num_gcs_induced++;
15984     }
15985 }
15986
15987 BOOL gc_heap::find_loh_free_for_no_gc()
15988 {
15989     allocator* loh_allocator = generation_allocator (generation_of (max_generation + 1));
15990     size_t sz_list = loh_allocator->first_bucket_size();
15991     size_t size = loh_allocation_no_gc;
15992     for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
15993     {
15994         if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
15995         {
15996             uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
15997             while (free_list)
15998             {
15999                 size_t free_list_size = unused_array_size(free_list);
16000
16001                 if (free_list_size > loh_allocation_no_gc)
16002                 {
16003                     dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size));
16004                     return TRUE;
16005                 }
16006
16007                 free_list = free_list_slot (free_list); 
16008             }
16009         }
16010         sz_list = sz_list * 2;
16011     }
16012
16013     return FALSE;
16014 }
16015
16016 BOOL gc_heap::find_loh_space_for_no_gc()
16017 {
16018     saved_loh_segment_no_gc = 0;
16019
16020     if (find_loh_free_for_no_gc())
16021         return TRUE;
16022
16023     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16024
16025     while (seg)
16026     {
16027         size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg);
16028         if (remaining >= loh_allocation_no_gc)
16029         {
16030             saved_loh_segment_no_gc = seg;
16031             break;
16032         }
16033         seg = heap_segment_next (seg);
16034     }
16035
16036     if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p)
16037     {
16038         // If no full GC is allowed, we try to get a new seg right away.
16039         saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc)
16040 #ifdef MULTIPLE_HEAPS
16041                                                       , this
16042 #endif //MULTIPLE_HEAPS
16043                                                       );
16044     }
16045
16046     return (saved_loh_segment_no_gc != 0);
16047 }
16048
16049 BOOL gc_heap::loh_allocated_for_no_gc()
16050 {
16051     if (!saved_loh_segment_no_gc)
16052         return FALSE;
16053
16054     heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16055     do 
16056     {
16057         if (seg == saved_loh_segment_no_gc)
16058         {
16059             return FALSE;
16060         }
16061         seg = heap_segment_next (seg);
16062     } while (seg);
16063
16064     return TRUE;
16065 }
16066
16067 BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg)
16068 {
16069     uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc;
16070     assert (end_committed <= heap_segment_reserved (seg));
16071     return (grow_heap_segment (seg, end_committed));
16072 }
16073
16074 void gc_heap::thread_no_gc_loh_segments()
16075 {
16076 #ifdef MULTIPLE_HEAPS
16077     for (int i = 0; i < n_heaps; i++)
16078     {
16079         gc_heap* hp = g_heaps[i];
16080         if (hp->loh_allocated_for_no_gc())
16081         {
16082             hp->thread_loh_segment (hp->saved_loh_segment_no_gc);
16083             hp->saved_loh_segment_no_gc = 0;
16084         }
16085     }
16086 #else //MULTIPLE_HEAPS
16087     if (loh_allocated_for_no_gc())
16088     {
16089         thread_loh_segment (saved_loh_segment_no_gc);
16090         saved_loh_segment_no_gc = 0;
16091     }
16092 #endif //MULTIPLE_HEAPS    
16093 }
16094
16095 void gc_heap::set_loh_allocations_for_no_gc()
16096 {
16097     if (current_no_gc_region_info.loh_allocation_size != 0)
16098     {
16099         dynamic_data* dd = dynamic_data_of (max_generation + 1);
16100         dd_new_allocation (dd) = loh_allocation_no_gc;
16101         dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16102     }
16103 }
16104
16105 void gc_heap::set_soh_allocations_for_no_gc()
16106 {
16107     if (current_no_gc_region_info.soh_allocation_size != 0)
16108     {
16109         dynamic_data* dd = dynamic_data_of (0);
16110         dd_new_allocation (dd) = soh_allocation_no_gc;
16111         dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16112 #ifdef MULTIPLE_HEAPS
16113         alloc_context_count = 0;
16114 #endif //MULTIPLE_HEAPS
16115     }
16116 }
16117
16118 void gc_heap::set_allocations_for_no_gc()
16119 {
16120 #ifdef MULTIPLE_HEAPS
16121     for (int i = 0; i < n_heaps; i++)
16122     {
16123         gc_heap* hp = g_heaps[i];
16124         hp->set_loh_allocations_for_no_gc();
16125         hp->set_soh_allocations_for_no_gc();
16126     }
16127 #else //MULTIPLE_HEAPS
16128     set_loh_allocations_for_no_gc();
16129     set_soh_allocations_for_no_gc();
16130 #endif //MULTIPLE_HEAPS
16131 }
16132
16133 BOOL gc_heap::should_proceed_for_no_gc()
16134 {
16135     BOOL gc_requested = FALSE;
16136     BOOL loh_full_gc_requested = FALSE;
16137     BOOL soh_full_gc_requested = FALSE;
16138     BOOL no_gc_requested = FALSE;
16139     BOOL get_new_loh_segments = FALSE;
16140
16141     if (current_no_gc_region_info.soh_allocation_size)
16142     {
16143 #ifdef MULTIPLE_HEAPS
16144         for (int i = 0; i < n_heaps; i++)
16145         {
16146             gc_heap* hp = g_heaps[i];
16147             if ((size_t)(heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated) < hp->soh_allocation_no_gc)
16148             {
16149                 gc_requested = TRUE;
16150                 break;
16151             }
16152         }
16153 #else //MULTIPLE_HEAPS
16154         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated) < soh_allocation_no_gc)
16155             gc_requested = TRUE;
16156 #endif //MULTIPLE_HEAPS
16157
16158         if (!gc_requested)
16159         {
16160 #ifdef MULTIPLE_HEAPS
16161             for (int i = 0; i < n_heaps; i++)
16162             {
16163                 gc_heap* hp = g_heaps[i];
16164                 if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc))))
16165                 {
16166                     soh_full_gc_requested = TRUE;
16167                     break;
16168                 }
16169             }
16170 #else //MULTIPLE_HEAPS
16171             if (!grow_heap_segment (ephemeral_heap_segment, (alloc_allocated + soh_allocation_no_gc)))
16172                 soh_full_gc_requested = TRUE;
16173 #endif //MULTIPLE_HEAPS
16174         }
16175     }
16176
16177     if (!current_no_gc_region_info.minimal_gc_p && gc_requested)
16178     {
16179         soh_full_gc_requested = TRUE;
16180     }
16181
16182     no_gc_requested = !(soh_full_gc_requested || gc_requested);
16183
16184     if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p)
16185     {
16186         current_no_gc_region_info.start_status = start_no_gc_no_memory;
16187         goto done;
16188     }
16189
16190     if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size)
16191     {
16192         // Check to see if we have enough reserved space. 
16193 #ifdef MULTIPLE_HEAPS
16194         for (int i = 0; i < n_heaps; i++)
16195         {
16196             gc_heap* hp = g_heaps[i];
16197             if (!hp->find_loh_space_for_no_gc())
16198             {
16199                 loh_full_gc_requested = TRUE;
16200                 break;
16201             }
16202         }
16203 #else //MULTIPLE_HEAPS
16204         if (!find_loh_space_for_no_gc())
16205             loh_full_gc_requested = TRUE;
16206 #endif //MULTIPLE_HEAPS
16207
16208         // Check to see if we have committed space.
16209         if (!loh_full_gc_requested)
16210         {
16211 #ifdef MULTIPLE_HEAPS
16212             for (int i = 0; i < n_heaps; i++)
16213             {
16214                 gc_heap* hp = g_heaps[i];
16215                 if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc))
16216                 {
16217                     loh_full_gc_requested = TRUE;
16218                     break;
16219                 }
16220             }
16221 #else //MULTIPLE_HEAPS
16222             if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc))
16223                 loh_full_gc_requested = TRUE;
16224 #endif //MULTIPLE_HEAPS
16225         }
16226     }
16227
16228     if (loh_full_gc_requested || soh_full_gc_requested)
16229     {
16230         if (current_no_gc_region_info.minimal_gc_p)
16231             current_no_gc_region_info.start_status = start_no_gc_no_memory;
16232     }
16233
16234     no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested);
16235
16236     if (current_no_gc_region_info.start_status == start_no_gc_success)
16237     {
16238         if (no_gc_requested)
16239             set_allocations_for_no_gc();
16240     }
16241
16242 done:
16243
16244     if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested)
16245         return TRUE;
16246     else
16247     {
16248         // We are done with starting the no_gc_region.
16249         current_no_gc_region_info.started = TRUE;
16250         return FALSE;
16251     }
16252 }
16253
16254 end_no_gc_region_status gc_heap::end_no_gc_region()
16255 {
16256     dprintf (1, ("end no gc called"));
16257
16258     end_no_gc_region_status status = end_no_gc_success;
16259
16260     if (!(current_no_gc_region_info.started))
16261         status = end_no_gc_not_in_progress;
16262     if (current_no_gc_region_info.num_gcs_induced)
16263         status = end_no_gc_induced;
16264     else if (current_no_gc_region_info.num_gcs)
16265         status = end_no_gc_alloc_exceeded;
16266
16267     if (settings.pause_mode == pause_no_gc)
16268         restore_data_for_no_gc();
16269
16270     // sets current_no_gc_region_info.started to FALSE here.
16271     memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
16272
16273     return status;
16274 }
16275
16276 //update counters
16277 void gc_heap::update_collection_counts ()
16278 {
16279     dynamic_data* dd0 = dynamic_data_of (0);
16280     dd_gc_clock (dd0) += 1;
16281
16282     size_t now = GetHighPrecisionTimeStamp();
16283
16284     for (int i = 0; i <= settings.condemned_generation;i++)
16285     {
16286         dynamic_data* dd = dynamic_data_of (i);
16287         dd_collection_count (dd)++;
16288         //this is needed by the linear allocation model
16289         if (i == max_generation)
16290             dd_collection_count (dynamic_data_of (max_generation+1))++;
16291         dd_gc_clock (dd) = dd_gc_clock (dd0);
16292         dd_time_clock (dd) = now;
16293     }
16294 }
16295
16296 BOOL gc_heap::expand_soh_with_minimal_gc()
16297 {
16298     if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc)
16299         return TRUE;
16300
16301     heap_segment* new_seg = soh_get_segment_to_expand();
16302     if (new_seg)
16303     {
16304         if (g_gc_card_table != card_table)
16305             copy_brick_card_table();
16306
16307         settings.promotion = TRUE;
16308         settings.demotion = FALSE;
16309         ephemeral_promotion = TRUE;
16310         int condemned_gen_number = max_generation - 1;
16311
16312         generation* gen = 0;
16313         int align_const = get_alignment_constant (TRUE);
16314
16315         for (int i = 0; i <= condemned_gen_number; i++)
16316         {
16317             gen = generation_of (i);
16318             saved_ephemeral_plan_start[i] = generation_allocation_start (gen);
16319             saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const);
16320         }
16321
16322         // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2
16323         // and need to make sure that there are no left over bricks from the previous GCs for the space 
16324         // we just used for gen0 allocation. We will need to go through the bricks for these objects for 
16325         // ephemeral GCs later.
16326         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
16327              b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment)));
16328              b++)
16329         {
16330             set_brick (b, -1);
16331         }
16332
16333         size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) - 
16334                                 generation_allocation_start (generation_of (max_generation - 1)));
16335         heap_segment_next (ephemeral_heap_segment) = new_seg;
16336         ephemeral_heap_segment = new_seg;
16337         uint8_t*  start = heap_segment_mem (ephemeral_heap_segment);
16338
16339         for (int i = condemned_gen_number; i >= 0; i--)
16340         {
16341             gen = generation_of (i);
16342             size_t gen_start_size = Align (min_obj_size);
16343             make_generation (generation_table[i], ephemeral_heap_segment, start, 0);
16344             generation_plan_allocation_start (gen) = start;
16345             generation_plan_allocation_start_size (gen) = gen_start_size;
16346             start += gen_start_size;
16347         }
16348         heap_segment_used (ephemeral_heap_segment) = start - plug_skew;
16349         heap_segment_plan_allocated (ephemeral_heap_segment) = start;
16350
16351         fix_generation_bounds (condemned_gen_number, generation_of (0));
16352
16353         dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
16354         dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));
16355
16356         adjust_ephemeral_limits();
16357         return TRUE;
16358     }
16359     else
16360         return FALSE;
16361 }
16362
16363 // Only to be done on the thread that calls restart in a join for server GC
16364 // and reset the oom status per heap.
16365 void gc_heap::check_and_set_no_gc_oom()
16366 {
16367 #ifdef MULTIPLE_HEAPS
16368     for (int i = 0; i < n_heaps; i++)
16369     {
16370         gc_heap* hp = g_heaps[i];
16371         if (hp->no_gc_oom_p)
16372         {
16373             current_no_gc_region_info.start_status = start_no_gc_no_memory;
16374             hp->no_gc_oom_p = false;
16375         }
16376     }
16377 #else
16378     if (no_gc_oom_p)
16379     {
16380         current_no_gc_region_info.start_status = start_no_gc_no_memory;
16381         no_gc_oom_p = false;
16382     }
16383 #endif //MULTIPLE_HEAPS
16384 }
16385
16386 void gc_heap::allocate_for_no_gc_after_gc()
16387 {
16388     if (current_no_gc_region_info.minimal_gc_p)
16389         repair_allocation_contexts (TRUE);
16390
16391     no_gc_oom_p = false;
16392
16393     if (current_no_gc_region_info.start_status != start_no_gc_no_memory)
16394     {
16395         if (current_no_gc_region_info.soh_allocation_size != 0)
16396         {
16397             if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) ||
16398                 (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc))))
16399             {
16400                 no_gc_oom_p = true;
16401             }
16402
16403 #ifdef MULTIPLE_HEAPS
16404             gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
16405             if (gc_t_join.joined())
16406             {
16407 #endif //MULTIPLE_HEAPS
16408
16409                 check_and_set_no_gc_oom();
16410
16411 #ifdef MULTIPLE_HEAPS
16412                 gc_t_join.restart();
16413             }
16414 #endif //MULTIPLE_HEAPS
16415         }
16416
16417         if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
16418             !(current_no_gc_region_info.minimal_gc_p) && 
16419             (current_no_gc_region_info.loh_allocation_size != 0))
16420         {
16421             gc_policy = policy_compact;
16422             saved_loh_segment_no_gc = 0;
16423
16424             if (!find_loh_free_for_no_gc())
16425             {
16426                 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16427                 BOOL found_seg_p = FALSE;
16428                 while (seg)
16429                 {
16430                     if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc)
16431                     {
16432                         found_seg_p = TRUE;
16433                         if (!commit_loh_for_no_gc (seg))
16434                         {
16435                             no_gc_oom_p = true;
16436                             break;
16437                         }
16438                     }
16439                     seg = heap_segment_next (seg);
16440                 }
16441
16442                 if (!found_seg_p)
16443                     gc_policy = policy_expand;
16444             }
16445
16446 #ifdef MULTIPLE_HEAPS
16447             gc_t_join.join(this, gc_join_expand_loh_no_gc);
16448             if (gc_t_join.joined())
16449             {
16450                 check_and_set_no_gc_oom();
16451
16452                 if (current_no_gc_region_info.start_status == start_no_gc_success)
16453                 {
16454                     for (int i = 0; i < n_heaps; i++)
16455                     {
16456                         gc_heap* hp = g_heaps[i];
16457                         if (hp->gc_policy == policy_expand)
16458                         {
16459                             hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
16460                             if (!(hp->saved_loh_segment_no_gc))
16461                             {
16462                                 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16463                                 break;
16464                             }
16465                         }
16466                     }
16467                 }
16468
16469                 gc_t_join.restart();
16470             }
16471 #else //MULTIPLE_HEAPS
16472             check_and_set_no_gc_oom();
16473
16474             if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand))
16475             {
16476                 saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc));
16477                 if (!saved_loh_segment_no_gc)
16478                     current_no_gc_region_info.start_status = start_no_gc_no_memory;
16479             }
16480 #endif //MULTIPLE_HEAPS
16481
16482             if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc)
16483             {
16484                 if (!commit_loh_for_no_gc (saved_loh_segment_no_gc))
16485                 {
16486                     no_gc_oom_p = true;
16487                 }
16488             }
16489         }
16490     }
16491
16492 #ifdef MULTIPLE_HEAPS
16493     gc_t_join.join(this, gc_join_final_no_gc);
16494     if (gc_t_join.joined())
16495     {
16496 #endif //MULTIPLE_HEAPS
16497
16498         check_and_set_no_gc_oom();
16499
16500         if (current_no_gc_region_info.start_status == start_no_gc_success)
16501         {
16502             set_allocations_for_no_gc();
16503             current_no_gc_region_info.started = TRUE;
16504         }
16505
16506 #ifdef MULTIPLE_HEAPS
16507         gc_t_join.restart();
16508     }
16509 #endif //MULTIPLE_HEAPS
16510 }
16511
16512 void gc_heap::init_records()
16513 {
16514     memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
16515     gc_data_per_heap.heap_index = heap_number;
16516     if (heap_number == 0)
16517         memset (&gc_data_global, 0, sizeof (gc_data_global));
16518
16519 #ifdef GC_CONFIG_DRIVEN
16520     memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc));
16521 #endif //GC_CONFIG_DRIVEN
16522 }
16523
16524 int gc_heap::garbage_collect (int n)
16525 {
16526     //reset the number of alloc contexts
16527     alloc_contexts_used = 0;
16528
16529     fix_allocation_contexts (TRUE);
16530 #ifdef MULTIPLE_HEAPS
16531 #ifdef JOIN_STATS
16532     gc_t_join.start_ts(this);
16533 #endif //JOIN_STATS
16534     clear_gen0_bricks();
16535 #endif //MULTIPLE_HEAPS
16536
16537     if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p)
16538     {
16539 #ifdef MULTIPLE_HEAPS
16540         gc_t_join.join(this, gc_join_minimal_gc);
16541         if (gc_t_join.joined())
16542         {
16543 #endif //MULTIPLE_HEAPS
16544
16545 #ifdef MULTIPLE_HEAPS
16546             // this is serialized because we need to get a segment
16547             for (int i = 0; i < n_heaps; i++)
16548             {
16549                 if (!(g_heaps[i]->expand_soh_with_minimal_gc()))
16550                     current_no_gc_region_info.start_status = start_no_gc_no_memory;
16551             }
16552 #else
16553             if (!expand_soh_with_minimal_gc())
16554                 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16555 #endif //MULTIPLE_HEAPS
16556
16557             update_collection_counts_for_no_gc();
16558
16559 #ifdef MULTIPLE_HEAPS
16560             gc_t_join.restart();
16561         }
16562 #endif //MULTIPLE_HEAPS
16563
16564         goto done;
16565     }
16566
16567     init_records();
16568     memset (&fgm_result, 0, sizeof (fgm_result));
16569
16570     settings.reason = gc_trigger_reason;
16571     verify_pinned_queue_p = FALSE;
16572
16573 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
16574         num_pinned_objects = 0;
16575 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
16576
16577 #ifdef STRESS_HEAP
16578     if (settings.reason == reason_gcstress)
16579     {
16580         settings.reason = reason_induced;
16581         settings.stress_induced = TRUE;
16582     }
16583 #endif // STRESS_HEAP
16584
16585 #ifdef MULTIPLE_HEAPS
16586     //align all heaps on the max generation to condemn
16587     dprintf (3, ("Joining for max generation to condemn"));
16588     condemned_generation_num = generation_to_condemn (n, 
16589                                                       &blocking_collection, 
16590                                                       &elevation_requested, 
16591                                                       FALSE);
16592     gc_t_join.join(this, gc_join_generation_determined);
16593     if (gc_t_join.joined())
16594 #endif //MULTIPLE_HEAPS
16595     {
16596 #ifdef MULTIPLE_HEAPS
16597 #if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
16598         //delete old slots from the segment table
16599         seg_table->delete_old_slots();
16600 #endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
16601         for (int i = 0; i < n_heaps; i++)
16602         {
16603             //copy the card and brick tables
16604             if (g_gc_card_table != g_heaps[i]->card_table)
16605             {
16606                 g_heaps[i]->copy_brick_card_table();
16607             }
16608
16609             g_heaps[i]->rearrange_large_heap_segments();
16610             if (!recursive_gc_sync::background_running_p())
16611             {
16612                 g_heaps[i]->rearrange_small_heap_segments();
16613             }
16614         }
16615 #else //MULTIPLE_HEAPS
16616 #ifdef BACKGROUND_GC
16617             //delete old slots from the segment table
16618 #if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
16619             seg_table->delete_old_slots();
16620 #endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
16621             rearrange_large_heap_segments();
16622             if (!recursive_gc_sync::background_running_p())
16623             {
16624                 rearrange_small_heap_segments();
16625             }
16626 #endif //BACKGROUND_GC
16627         // check for card table growth
16628         if (g_gc_card_table != card_table)
16629             copy_brick_card_table();
16630
16631 #endif //MULTIPLE_HEAPS
16632
16633         BOOL should_evaluate_elevation = FALSE;
16634         BOOL should_do_blocking_collection = FALSE;
16635
16636 #ifdef MULTIPLE_HEAPS
16637         int gen_max = condemned_generation_num;
16638         for (int i = 0; i < n_heaps; i++)
16639         {
16640             if (gen_max < g_heaps[i]->condemned_generation_num)
16641                 gen_max = g_heaps[i]->condemned_generation_num;
16642             if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
16643                 should_evaluate_elevation = TRUE;
16644             if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
16645                 should_do_blocking_collection = TRUE;
16646         }
16647
16648         settings.condemned_generation = gen_max;
16649 #else //MULTIPLE_HEAPS
16650         settings.condemned_generation = generation_to_condemn (n, 
16651                                                             &blocking_collection, 
16652                                                             &elevation_requested, 
16653                                                             FALSE);
16654         should_evaluate_elevation = elevation_requested;
16655         should_do_blocking_collection = blocking_collection;
16656 #endif //MULTIPLE_HEAPS
16657
16658         settings.condemned_generation = joined_generation_to_condemn (
16659                                             should_evaluate_elevation, 
16660                                             settings.condemned_generation,
16661                                             &should_do_blocking_collection
16662                                             STRESS_HEAP_ARG(n)
16663                                             );
16664
16665         STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, 
16666                 "condemned generation num: %d\n", settings.condemned_generation);
16667
16668         record_gcs_during_no_gc();
16669
16670         if (settings.condemned_generation > 1)
16671             settings.promotion = TRUE;
16672
16673 #ifdef HEAP_ANALYZE
16674         // At this point we've decided what generation is condemned
16675         // See if we've been requested to analyze survivors after the mark phase
16676         if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation))
16677         {
16678             heap_analyze_enabled = TRUE;
16679         }
16680 #endif // HEAP_ANALYZE
16681
16682         GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
16683
16684 #ifdef BACKGROUND_GC
16685         if ((settings.condemned_generation == max_generation) &&
16686             (recursive_gc_sync::background_running_p()))
16687         {
16688             //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
16689             // because we have to collect 0 and 1 properly
16690             // in particular, the allocation contexts are gone.
16691             // For now, it is simpler to collect max_generation-1
16692             settings.condemned_generation = max_generation - 1;
16693             dprintf (GTC_LOG, ("bgc - 1 instead of 2"));
16694         }
16695
16696         if ((settings.condemned_generation == max_generation) &&
16697             (should_do_blocking_collection == FALSE) &&
16698             gc_can_use_concurrent &&
16699             !temp_disable_concurrent_p &&                 
16700             ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)))
16701         {
16702             keep_bgc_threads_p = TRUE;
16703             c_write (settings.concurrent,  TRUE);
16704         }
16705 #endif //BACKGROUND_GC
16706
16707         settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1;
16708
16709         // Call the EE for start of GC work
16710         // just one thread for MP GC
16711         GCToEEInterface::GcStartWork (settings.condemned_generation,
16712                                  max_generation);            
16713
16714         // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to
16715         // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire
16716         // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be
16717         // fired in gc1.
16718         do_pre_gc();
16719
16720 #ifdef MULTIPLE_HEAPS
16721         gc_start_event.Reset();
16722         //start all threads on the roots.
16723         dprintf(3, ("Starting all gc threads for gc"));
16724         gc_t_join.restart();
16725 #endif //MULTIPLE_HEAPS
16726     }
16727
16728     {
16729         int gen_num_for_data = max_generation + 1;
16730         for (int i = 0; i <= gen_num_for_data; i++)
16731         {
16732             gc_data_per_heap.gen_data[i].size_before = generation_size (i);
16733             generation* gen = generation_of (i);
16734             gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen);
16735             gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen);
16736         }
16737     }
16738     descr_generations (TRUE);
16739 //    descr_card_table();
16740
16741 #ifdef VERIFY_HEAP
16742     if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
16743        !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY))
16744     {
16745         verify_heap (TRUE);
16746     }
16747     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)
16748         checkGCWriteBarrier();
16749
16750 #endif // VERIFY_HEAP
16751
16752 #ifdef BACKGROUND_GC
16753     if (settings.concurrent)
16754     {
16755         // We need to save the settings because we'll need to restore it after each FGC.
16756         assert (settings.condemned_generation == max_generation);
16757         settings.compaction = FALSE;
16758         saved_bgc_settings = settings;
16759
16760 #ifdef MULTIPLE_HEAPS
16761         if (heap_number == 0)
16762         {
16763             for (int i = 0; i < n_heaps; i++)
16764             {
16765                 prepare_bgc_thread (g_heaps[i]);
16766             }
16767             dprintf (2, ("setting bgc_threads_sync_event"));
16768             bgc_threads_sync_event.Set();
16769         }
16770         else
16771         {
16772             bgc_threads_sync_event.Wait(INFINITE, FALSE);
16773             dprintf (2, ("bgc_threads_sync_event is signalled"));
16774         }
16775 #else
16776         prepare_bgc_thread(0);
16777 #endif //MULTIPLE_HEAPS
16778
16779 #ifdef MULTIPLE_HEAPS
16780         gc_t_join.join(this, gc_join_start_bgc);
16781         if (gc_t_join.joined())
16782 #endif //MULTIPLE_HEAPS
16783         {
16784             do_concurrent_p = TRUE;
16785             do_ephemeral_gc_p = FALSE;
16786 #ifdef MULTIPLE_HEAPS
16787             dprintf(2, ("Joined to perform a background GC"));
16788
16789             for (int i = 0; i < n_heaps; i++)
16790             {
16791                 gc_heap* hp = g_heaps[i];
16792                 if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
16793                 {
16794                     do_concurrent_p = FALSE;
16795                     break;
16796                 }
16797                 else
16798                 {
16799                     hp->background_saved_lowest_address = hp->lowest_address;
16800                     hp->background_saved_highest_address = hp->highest_address;
16801                 }
16802             }
16803 #else
16804             do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
16805             if (do_concurrent_p)
16806             {
16807                 background_saved_lowest_address = lowest_address;
16808                 background_saved_highest_address = highest_address;
16809             }
16810 #endif //MULTIPLE_HEAPS
16811
16812             if (do_concurrent_p)
16813             {
16814 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
16815                 SoftwareWriteWatch::EnableForGCHeap();
16816 #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
16817
16818 #ifdef MULTIPLE_HEAPS
16819                 for (int i = 0; i < n_heaps; i++)
16820                     g_heaps[i]->current_bgc_state = bgc_initialized;
16821 #else
16822                 current_bgc_state = bgc_initialized;
16823 #endif //MULTIPLE_HEAPS
16824
16825                 int gen = check_for_ephemeral_alloc();
16826                 // always do a gen1 GC before we start BGC. 
16827                 // This is temporary for testing purpose.
16828                 //int gen = max_generation - 1;
16829                 dont_restart_ee_p = TRUE;
16830                 if (gen == -1)
16831                 {
16832                     // If we decide to not do a GC before the BGC we need to 
16833                     // restore the gen0 alloc context.
16834 #ifdef MULTIPLE_HEAPS
16835                     for (int i = 0; i < n_heaps; i++)
16836                     {
16837                         generation_allocation_pointer (g_heaps[i]->generation_of (0)) =  0;
16838                         generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0;
16839                     }
16840 #else
16841                     generation_allocation_pointer (youngest_generation) =  0;
16842                     generation_allocation_limit (youngest_generation) = 0;
16843 #endif //MULTIPLE_HEAPS
16844                 }
16845                 else
16846                 {
16847                     do_ephemeral_gc_p = TRUE;
16848
16849                     settings.init_mechanisms();
16850                     settings.condemned_generation = gen;
16851                     settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2;
16852                     do_pre_gc();
16853
16854                     // TODO BACKGROUND_GC need to add the profiling stuff here.
16855                     dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen));
16856                 }
16857
16858                 //clear the cards so they don't bleed in gen 1 during collection
16859                 // shouldn't this always be done at the beginning of any GC?
16860                 //clear_card_for_addresses (
16861                 //    generation_allocation_start (generation_of (0)),
16862                 //    heap_segment_allocated (ephemeral_heap_segment));
16863
16864                 if (!do_ephemeral_gc_p)
16865                 {
16866                     do_background_gc();
16867                 }
16868             }
16869             else
16870             {
16871                 settings.compaction = TRUE;
16872                 c_write (settings.concurrent, FALSE);
16873             }
16874
16875 #ifdef MULTIPLE_HEAPS
16876             gc_t_join.restart();
16877 #endif //MULTIPLE_HEAPS
16878         }
16879
16880         if (do_concurrent_p)
16881         {
16882             // At this point we are sure we'll be starting a BGC, so save its per heap data here.
16883             // global data is only calculated at the end of the GC so we don't need to worry about
16884             // FGCs overwriting it.
16885             memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap));
16886             memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));
16887
16888             if (do_ephemeral_gc_p)
16889             {
16890                 dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation));
16891
16892                 gen_to_condemn_reasons.init();
16893                 gen_to_condemn_reasons.set_condition (gen_before_bgc);
16894                 gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons);
16895                 gc1();
16896 #ifdef MULTIPLE_HEAPS
16897                 gc_t_join.join(this, gc_join_bgc_after_ephemeral);
16898                 if (gc_t_join.joined())
16899 #endif //MULTIPLE_HEAPS
16900                 {
16901 #ifdef MULTIPLE_HEAPS
16902                     do_post_gc();
16903 #endif //MULTIPLE_HEAPS
16904                     settings = saved_bgc_settings;
16905                     assert (settings.concurrent);
16906
16907                     do_background_gc();
16908
16909 #ifdef MULTIPLE_HEAPS
16910                     gc_t_join.restart();
16911 #endif //MULTIPLE_HEAPS
16912                 }
16913             }
16914         }
16915         else
16916         {
16917             dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC"));
16918             gc1();
16919         }
16920     }
16921     else
16922 #endif //BACKGROUND_GC
16923     {
16924         gc1();
16925     }
16926 #ifndef MULTIPLE_HEAPS
16927     allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
16928     allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
16929     fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
16930 #endif //MULTIPLE_HEAPS
16931
16932 done:
16933     if (settings.pause_mode == pause_no_gc)
16934         allocate_for_no_gc_after_gc();
16935
16936     int gn = settings.condemned_generation;
16937     return gn;
16938 }
16939
16940 #define mark_stack_empty_p() (mark_stack_base == mark_stack_tos)
16941
16942 inline
16943 size_t& gc_heap::promoted_bytes(int thread)
16944 {
16945 #ifdef MULTIPLE_HEAPS
16946     return g_promoted [thread*16];
16947 #else //MULTIPLE_HEAPS
16948     UNREFERENCED_PARAMETER(thread);
16949     return g_promoted;
16950 #endif //MULTIPLE_HEAPS
16951 }
16952
16953 #ifdef INTERIOR_POINTERS
16954 heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
16955 {
16956 #ifdef SEG_MAPPING_TABLE
16957     heap_segment* seg = seg_mapping_table_segment_of (interior);
16958     if (seg)
16959     {
16960         if (small_segment_only_p && heap_segment_loh_p (seg))
16961             return 0;
16962     }
16963     return seg;
16964 #else //SEG_MAPPING_TABLE
16965 #ifdef MULTIPLE_HEAPS
16966     for (int i = 0; i < gc_heap::n_heaps; i++)
16967     {
16968         gc_heap* h = gc_heap::g_heaps [i];
16969         hs = h->find_segment_per_heap (o, small_segment_only_p);
16970         if (hs)
16971         {
16972             break;
16973         }        
16974     }
16975 #else
16976     {
16977         gc_heap* h = pGenGCHeap;
16978         hs = h->find_segment_per_heap (o, small_segment_only_p);
16979     }
16980 #endif //MULTIPLE_HEAPS
16981 #endif //SEG_MAPPING_TABLE
16982 }
16983
16984 heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
16985 {
16986 #ifdef SEG_MAPPING_TABLE
16987     return find_segment (interior, small_segment_only_p);
16988 #else //SEG_MAPPING_TABLE
16989     if (in_range_for_segment (interior, ephemeral_heap_segment))
16990     {
16991         return ephemeral_heap_segment;
16992     }
16993     else
16994     {
16995         heap_segment* found_seg = 0;
16996
16997         {
16998             heap_segment* seg = generation_start_segment (generation_of (max_generation));
16999             do
17000             {
17001                 if (in_range_for_segment (interior, seg))
17002                 {
17003                     found_seg = seg;
17004                     goto end_find_segment;
17005                 }
17006
17007             } while ((seg = heap_segment_next (seg)) != 0);
17008         }
17009         if (!small_segment_only_p)
17010         {
17011 #ifdef BACKGROUND_GC
17012             {
17013                 ptrdiff_t delta = 0;
17014                 heap_segment* seg = segment_of (interior, delta);
17015                 if (seg && in_range_for_segment (interior, seg))
17016                 {
17017                     found_seg = seg;
17018                 }
17019                 goto end_find_segment;
17020             }
17021 #else //BACKGROUND_GC
17022             heap_segment* seg = generation_start_segment (generation_of (max_generation+1));
17023             do
17024             {
17025                 if (in_range_for_segment(interior, seg))
17026                 {
17027                     found_seg = seg;
17028                     goto end_find_segment;
17029                 }
17030
17031             } while ((seg = heap_segment_next (seg)) != 0);
17032 #endif //BACKGROUND_GC
17033         }
17034 end_find_segment:
17035
17036         return found_seg;
17037     }
17038 #endif //SEG_MAPPING_TABLE
17039 }
17040 #endif //INTERIOR_POINTERS
17041
17042 #if !defined(_DEBUG) && !defined(__GNUC__)
17043 inline // This causes link errors if global optimization is off
17044 #endif //!_DEBUG && !__GNUC__
17045 gc_heap* gc_heap::heap_of (uint8_t* o)
17046 {
17047 #ifdef MULTIPLE_HEAPS
17048     if (o == 0)
17049         return g_heaps [0];
17050 #ifdef SEG_MAPPING_TABLE
17051     gc_heap* hp = seg_mapping_table_heap_of (o);
17052     return (hp ? hp : g_heaps[0]);
17053 #else //SEG_MAPPING_TABLE
17054     ptrdiff_t delta = 0;
17055     heap_segment* seg = segment_of (o, delta);
17056     return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17057 #endif //SEG_MAPPING_TABLE
17058 #else //MULTIPLE_HEAPS
17059     UNREFERENCED_PARAMETER(o);
17060     return __this;
17061 #endif //MULTIPLE_HEAPS
17062 }
17063
17064 inline
17065 gc_heap* gc_heap::heap_of_gc (uint8_t* o)
17066 {
17067 #ifdef MULTIPLE_HEAPS
17068     if (o == 0)
17069         return g_heaps [0];
17070 #ifdef SEG_MAPPING_TABLE
17071     gc_heap* hp = seg_mapping_table_heap_of_gc (o);
17072     return (hp ? hp : g_heaps[0]);
17073 #else //SEG_MAPPING_TABLE
17074     ptrdiff_t delta = 0;
17075     heap_segment* seg = segment_of (o, delta);
17076     return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17077 #endif //SEG_MAPPING_TABLE
17078 #else //MULTIPLE_HEAPS
17079     UNREFERENCED_PARAMETER(o);
17080     return __this;
17081 #endif //MULTIPLE_HEAPS
17082 }
17083
17084 #ifdef INTERIOR_POINTERS
17085 // will find all heap objects (large and small)
17086 uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
17087 {
17088     if (!gen0_bricks_cleared)
17089     {
17090 #ifdef MULTIPLE_HEAPS
17091         assert (!"Should have already been done in server GC");
17092 #endif //MULTIPLE_HEAPS
17093         gen0_bricks_cleared = TRUE;
17094         //initialize brick table for gen 0
17095         for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
17096              b < brick_of (align_on_brick
17097                            (heap_segment_allocated (ephemeral_heap_segment)));
17098              b++)
17099         {
17100             set_brick (b, -1);
17101         }
17102     }
17103 #ifdef FFIND_OBJECT
17104     //indicate that in the future this needs to be done during allocation
17105 #ifdef MULTIPLE_HEAPS
17106     gen0_must_clear_bricks = FFIND_DECAY*gc_heap::n_heaps;
17107 #else
17108     gen0_must_clear_bricks = FFIND_DECAY;
17109 #endif //MULTIPLE_HEAPS
17110 #endif //FFIND_OBJECT
17111
17112     int brick_entry = get_brick_entry(brick_of (interior));
17113     if (brick_entry == 0)
17114     {
17115         // this is a pointer to a large object
17116         heap_segment* seg = find_segment_per_heap (interior, FALSE);
17117         if (seg
17118 #ifdef FEATURE_CONSERVATIVE_GC
17119             && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
17120 #endif
17121             )
17122         {
17123             // If interior falls within the first free object at the beginning of a generation,
17124             // we don't have brick entry for it, and we may incorrectly treat it as on large object heap.
17125             int align_const = get_alignment_constant (heap_segment_read_only_p (seg)
17126 #ifdef FEATURE_CONSERVATIVE_GC
17127                                                        || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg))
17128 #endif
17129                                                       );
17130             //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
17131             assert (interior < heap_segment_allocated (seg));
17132
17133             uint8_t* o = heap_segment_mem (seg);
17134             while (o < heap_segment_allocated (seg))
17135             {
17136                 uint8_t* next_o = o + Align (size (o), align_const);
17137                 assert (next_o > o);
17138                 if ((o <= interior) && (interior < next_o))
17139                 return o;
17140                 o = next_o;
17141             }
17142             return 0;
17143         }
17144         else
17145         {
17146             return 0;
17147         }
17148     }
17149     else if (interior >= low)
17150     {
17151         heap_segment* seg = find_segment_per_heap (interior, TRUE);
17152         if (seg)
17153         {
17154 #ifdef FEATURE_CONSERVATIVE_GC
17155             if (interior >= heap_segment_allocated (seg))
17156                 return 0;
17157 #else
17158             assert (interior < heap_segment_allocated (seg));
17159 #endif
17160             uint8_t* o = find_first_object (interior, heap_segment_mem (seg));
17161             return o;
17162         }
17163         else
17164             return 0;
17165     }
17166     else
17167         return 0;
17168 }
17169
17170 uint8_t*
17171 gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
17172 {
17173     uint8_t* old_address = interior;
17174     if (!((old_address >= low) && (old_address < high)))
17175         return 0;
17176     uint8_t* plug = 0;
17177     size_t  brick = brick_of (old_address);
17178     int    brick_entry =  brick_table [ brick ];
17179     if (brick_entry != 0)
17180     {
17181     retry:
17182         {
17183             while (brick_entry < 0)
17184             {
17185                 brick = (brick + brick_entry);
17186                 brick_entry =  brick_table [ brick ];
17187             }
17188             uint8_t* old_loc = old_address;
17189             uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
17190                                       old_loc);
17191             if (node <= old_loc)
17192                 plug = node;
17193             else
17194             {
17195                 brick = brick - 1;
17196                 brick_entry =  brick_table [ brick ];
17197                 goto retry;
17198             }
17199
17200         }
17201         assert (plug);
17202         //find the object by going along the plug
17203         uint8_t* o = plug;
17204         while (o <= interior)
17205         {
17206             uint8_t* next_o = o + Align (size (o));
17207             assert (next_o > o);
17208             if (next_o > interior)
17209             {
17210                 break;
17211             }
17212             o = next_o;
17213         }
17214         assert ((o <= interior) && ((o + Align (size (o))) > interior));
17215         return o;
17216     }
17217     else
17218     {
17219         // this is a pointer to a large object
17220         heap_segment* seg = find_segment_per_heap (interior, FALSE);
17221         if (seg)
17222         {
17223             assert (interior < heap_segment_allocated (seg));
17224
17225             uint8_t* o = heap_segment_mem (seg);
17226             while (o < heap_segment_allocated (seg))
17227             {
17228                 uint8_t* next_o = o + Align (size (o));
17229                 assert (next_o > o);
17230                 if ((o < interior) && (interior < next_o))
17231                 return o;
17232                 o = next_o;
17233             }
17234             return 0;
17235         }
17236         else
17237             {
17238             return 0;
17239         }
17240     }
17241 }
17242 #else //INTERIOR_POINTERS
17243 inline
17244 uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
17245 {
17246     return o;
17247 }
17248 #endif //INTERIOR_POINTERS
17249
17250 #ifdef MARK_LIST
17251 #ifdef GC_CONFIG_DRIVEN
17252 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;}
17253 #else
17254 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;}
17255 #endif //GC_CONFIG_DRIVEN
17256 #else //MARK_LIST
17257 #define m_boundary(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17258 #endif //MARK_LIST
17259
17260 #define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17261
17262 #define method_table(o) ((CObjectHeader*)(o))->GetMethodTable()
17263
17264 inline
17265 BOOL gc_heap::gc_mark1 (uint8_t* o)
17266 {
17267     BOOL marked = !marked (o);
17268     set_marked (o);
17269     dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked));
17270     return marked;
17271 }
17272
17273 inline
17274 BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17275 {
17276     BOOL marked = FALSE;
17277     if ((o >= low) && (o < high))
17278         marked = gc_mark1 (o);
17279 #ifdef MULTIPLE_HEAPS
17280     else if (o)
17281     {
17282         //find the heap
17283         gc_heap* hp = heap_of_gc (o);
17284         assert (hp);
17285         if ((o >= hp->gc_low) && (o < hp->gc_high))
17286             marked = gc_mark1 (o);
17287     }
17288 #ifdef SNOOP_STATS
17289     snoop_stat.objects_checked_count++;
17290
17291     if (marked)
17292     {
17293         snoop_stat.objects_marked_count++;
17294     }
17295     if (!o)
17296     {
17297         snoop_stat.zero_ref_count++;
17298     }
17299
17300 #endif //SNOOP_STATS
17301 #endif //MULTIPLE_HEAPS
17302     return marked;
17303 }
17304
17305 #ifdef BACKGROUND_GC
17306
17307 inline
17308 BOOL gc_heap::background_marked (uint8_t* o)
17309 {
17310     return mark_array_marked (o);
17311 }
17312 inline
17313 BOOL gc_heap::background_mark1 (uint8_t* o)
17314 {
17315     BOOL to_mark = !mark_array_marked (o);
17316
17317     dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0)));
17318     if (to_mark)
17319     {
17320         mark_array_set_marked (o);
17321         dprintf (4, ("n*%Ix*n", (size_t)o));
17322         return TRUE;
17323     }
17324     else
17325         return FALSE;
17326 }
17327
17328 // TODO: we could consider filtering out NULL's here instead of going to 
17329 // look for it on other heaps
17330 inline
17331 BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17332 {
17333     BOOL marked = FALSE;
17334     if ((o >= low) && (o < high))
17335         marked = background_mark1 (o);
17336 #ifdef MULTIPLE_HEAPS
17337     else if (o)
17338     {
17339         //find the heap
17340         gc_heap* hp = heap_of (o);
17341         assert (hp);
17342         if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
17343             marked = background_mark1 (o);
17344     }
17345 #endif //MULTIPLE_HEAPS
17346     return marked;
17347 }
17348
17349 #endif //BACKGROUND_GC
17350
17351 inline
17352 uint8_t* gc_heap::next_end (heap_segment* seg, uint8_t* f)
17353 {
17354     if (seg == ephemeral_heap_segment)
17355         return  f;
17356     else
17357         return  heap_segment_allocated (seg);
17358 }
17359
17360 #define new_start() {if (ppstop <= start) {break;} else {parm = start}}
17361 #define ignore_start 0
17362 #define use_start 1
17363
17364 #define go_through_object(mt,o,size,parm,start,start_useful,limit,exp)      \
17365 {                                                                           \
17366     CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt));           \
17367     CGCDescSeries* cur = map->GetHighestSeries();                           \
17368     ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries();                        \
17369                                                                             \
17370     if (cnt >= 0)                                                           \
17371     {                                                                       \
17372         CGCDescSeries* last = map->GetLowestSeries();                       \
17373         uint8_t** parm = 0;                                                 \
17374         do                                                                  \
17375         {                                                                   \
17376             assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset()));     \
17377             parm = (uint8_t**)((o) + cur->GetSeriesOffset());               \
17378             uint8_t** ppstop =                                              \
17379                 (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\
17380             if (!start_useful || (uint8_t*)ppstop > (start))                \
17381             {                                                               \
17382                 if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\
17383                 while (parm < ppstop)                                       \
17384                 {                                                           \
17385                    {exp}                                                    \
17386                    parm++;                                                  \
17387                 }                                                           \
17388             }                                                               \
17389             cur--;                                                          \
17390                                                                             \
17391         } while (cur >= last);                                              \
17392     }                                                                       \
17393     else                                                                    \
17394     {                                                                       \
17395         /* Handle the repeating case - array of valuetypes */               \
17396         uint8_t** parm = (uint8_t**)((o) + cur->startoffset);               \
17397         if (start_useful && start > (uint8_t*)parm)                         \
17398         {                                                                   \
17399             ptrdiff_t cs = mt->RawGetComponentSize();                         \
17400             parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \
17401         }                                                                   \
17402         while ((uint8_t*)parm < ((o)+(size)-plug_skew))                     \
17403         {                                                                   \
17404             for (ptrdiff_t __i = 0; __i > cnt; __i--)                         \
17405             {                                                               \
17406                 HALF_SIZE_T skip =  cur->val_serie[__i].skip;               \
17407                 HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs;              \
17408                 uint8_t** ppstop = parm + nptrs;                            \
17409                 if (!start_useful || (uint8_t*)ppstop > (start))            \
17410                 {                                                           \
17411                     if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);      \
17412                     do                                                      \
17413                     {                                                       \
17414                        {exp}                                                \
17415                        parm++;                                              \
17416                     } while (parm < ppstop);                                \
17417                 }                                                           \
17418                 parm = (uint8_t**)((uint8_t*)ppstop + skip);                \
17419             }                                                               \
17420         }                                                                   \
17421     }                                                                       \
17422 }
17423
17424 #define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); }
17425
17426 // 1 thing to note about this macro:
17427 // 1) you can use *parm safely but in general you don't want to use parm 
17428 // because for the collectible types it's not an address on the managed heap.
17429 #ifndef COLLECTIBLE_CLASS
17430 #define go_through_object_cl(mt,o,size,parm,exp)                            \
17431 {                                                                           \
17432     if (header(o)->ContainsPointers())                                      \
17433     {                                                                       \
17434         go_through_object_nostart(mt,o,size,parm,exp);                      \
17435     }                                                                       \
17436 }
17437 #else //COLLECTIBLE_CLASS
17438 #define go_through_object_cl(mt,o,size,parm,exp)                            \
17439 {                                                                           \
17440     if (header(o)->Collectible())                                           \
17441     {                                                                       \
17442         uint8_t* class_obj = get_class_object (o);                             \
17443         uint8_t** parm = &class_obj;                                           \
17444         do {exp} while (false);                                             \
17445     }                                                                       \
17446     if (header(o)->ContainsPointers())                                      \
17447     {                                                                       \
17448         go_through_object_nostart(mt,o,size,parm,exp);                      \
17449     }                                                                       \
17450 }
17451 #endif //COLLECTIBLE_CLASS
17452
17453 // This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called.
17454 void gc_heap::enque_pinned_plug (uint8_t* plug,
17455                                  BOOL save_pre_plug_info_p, 
17456                                  uint8_t* last_object_in_last_plug)
17457 {
17458     if (mark_stack_array_length <= mark_stack_tos)
17459     {
17460         if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH))
17461         {
17462             // we don't want to continue here due to security
17463             // risks. This happens very rarely and fixing it in the
17464             // way so that we can continue is a bit involved and will
17465             // not be done in Dev10.
17466             GCToEEInterface::HandleFatalError(CORINFO_EXCEPTION_GC);
17467         }
17468     }
17469
17470     dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d", 
17471         mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0)));
17472     mark& m = mark_stack_array[mark_stack_tos];
17473     m.first = plug;
17474     // Must be set now because if we have a short object we'll need the value of saved_pre_p.
17475     m.saved_pre_p = save_pre_plug_info_p;
17476
17477     if (save_pre_plug_info_p)
17478     {
17479 #ifdef SHORT_PLUGS
17480         BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17481         if (is_padded)
17482             clear_plug_padded (last_object_in_last_plug);
17483 #endif //SHORT_PLUGS
17484         memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17485 #ifdef SHORT_PLUGS
17486         if (is_padded)
17487             set_plug_padded (last_object_in_last_plug);
17488 #endif //SHORT_PLUGS
17489
17490         memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17491
17492         // If the last object in the last plug is too short, it requires special handling.
17493         size_t last_obj_size = plug - last_object_in_last_plug;
17494         if (last_obj_size < min_pre_pin_obj_size)
17495         {
17496             record_interesting_data_point (idp_pre_short);
17497 #ifdef SHORT_PLUGS
17498             if (is_padded)
17499                 record_interesting_data_point (idp_pre_short_padded);
17500 #endif //SHORT_PLUGS
17501             dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!", 
17502                          last_object_in_last_plug, plug));
17503             // Need to set the short bit regardless of having refs or not because we need to 
17504             // indicate that this object is not walkable.
17505             m.set_pre_short();
17506
17507 #ifdef COLLECTIBLE_CLASS
17508             if (is_collectible (last_object_in_last_plug))
17509             {
17510                 m.set_pre_short_collectible();
17511             }
17512 #endif //COLLECTIBLE_CLASS
17513
17514             if (contain_pointers (last_object_in_last_plug))
17515             {
17516                 dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17517
17518                 go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17519                     {
17520                         size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17521                         dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17522                         m.set_pre_short_bit (gap_offset);
17523                     }
17524                 );
17525             }
17526         }
17527     }
17528
17529     m.saved_post_p = FALSE;
17530 }
17531
17532 void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
17533 {
17534     UNREFERENCED_PARAMETER(last_pinned_plug);
17535
17536     mark& m = mark_stack_array[mark_stack_tos - 1];
17537     assert (last_pinned_plug == m.first);
17538     m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]);
17539
17540 #ifdef SHORT_PLUGS
17541     BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17542     if (is_padded)
17543         clear_plug_padded (last_object_in_last_plug);
17544 #endif //SHORT_PLUGS
17545     memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17546 #ifdef SHORT_PLUGS
17547     if (is_padded)
17548         set_plug_padded (last_object_in_last_plug);
17549 #endif //SHORT_PLUGS
17550
17551     memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17552
17553     // This is important - we need to clear all bits here except the last one.
17554     m.saved_post_p = TRUE;
17555
17556 #ifdef _DEBUG
17557     m.saved_post_plug_debug.gap = 1;
17558 #endif //_DEBUG
17559
17560     dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug));
17561
17562     size_t last_obj_size = post_plug - last_object_in_last_plug;
17563     if (last_obj_size < min_pre_pin_obj_size)
17564     {
17565         dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug));
17566         record_interesting_data_point (idp_post_short);
17567 #ifdef SHORT_PLUGS
17568         if (is_padded)
17569             record_interesting_data_point (idp_post_short_padded);
17570 #endif //SHORT_PLUGS
17571         m.set_post_short();
17572         verify_pinned_queue_p = TRUE;
17573
17574 #ifdef COLLECTIBLE_CLASS
17575         if (is_collectible (last_object_in_last_plug))
17576         {
17577             m.set_post_short_collectible();
17578         }
17579 #endif //COLLECTIBLE_CLASS
17580
17581         if (contain_pointers (last_object_in_last_plug))
17582         {
17583             dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17584
17585             // TODO: since we won't be able to walk this object in relocation, we still need to
17586             // take care of collectible assemblies here.
17587             go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17588                 {
17589                     size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17590                     dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17591                     m.set_post_short_bit (gap_offset);
17592                 }
17593             );
17594         }
17595     }
17596 }
17597
17598 //#define PREFETCH
17599 #ifdef PREFETCH
17600 __declspec(naked) void __fastcall Prefetch(void* addr)
17601 {
17602    __asm {
17603        PREFETCHT0 [ECX]
17604         ret
17605     };
17606 }
17607 #else //PREFETCH
17608 inline void Prefetch (void* addr)
17609 {
17610     UNREFERENCED_PARAMETER(addr);
17611 }
17612 #endif //PREFETCH
17613 #ifdef MH_SC_MARK
17614 inline
17615 VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index)
17616 {
17617     return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index];
17618 }
17619
17620 #endif //MH_SC_MARK
17621
17622 #define stolen 2
17623 #define partial 1
17624 #define partial_object 3
17625 inline 
17626 uint8_t* ref_from_slot (uint8_t* r)
17627 {
17628     return (uint8_t*)((size_t)r & ~(stolen | partial));
17629 }
17630 inline
17631 BOOL stolen_p (uint8_t* r)
17632 {
17633     return (((size_t)r&2) && !((size_t)r&1));
17634 }
17635 inline 
17636 BOOL ready_p (uint8_t* r)
17637 {
17638     return ((size_t)r != 1);
17639 }
17640 inline
17641 BOOL partial_p (uint8_t* r)
17642 {
17643     return (((size_t)r&1) && !((size_t)r&2));
17644 }
17645 inline 
17646 BOOL straight_ref_p (uint8_t* r)
17647 {
17648     return (!stolen_p (r) && !partial_p (r));
17649 }
17650 inline 
17651 BOOL partial_object_p (uint8_t* r)
17652 {
17653     return (((size_t)r & partial_object) == partial_object);
17654 }
17655 inline
17656 BOOL ref_p (uint8_t* r)
17657 {
17658     return (straight_ref_p (r) || partial_object_p (r));
17659 }
17660
17661 void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL)
17662 {
17663     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array;
17664     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length];
17665     SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos;
17666 #ifdef SORT_MARK_STACK
17667     SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base;
17668 #endif //SORT_MARK_STACK
17669
17670     // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't 
17671     // update mark list.
17672     BOOL  full_p = (settings.condemned_generation == max_generation);
17673
17674     assert ((start >= oo) && (start < oo+size(oo)));
17675
17676 #ifndef MH_SC_MARK
17677     *mark_stack_tos = oo;
17678 #endif //!MH_SC_MARK
17679
17680     while (1)
17681     {
17682 #ifdef MULTIPLE_HEAPS
17683 #else  //MULTIPLE_HEAPS
17684         const int thread = 0;
17685 #endif //MULTIPLE_HEAPS
17686
17687         if (oo && ((size_t)oo != 4))
17688         {
17689             size_t s = 0; 
17690             if (stolen_p (oo))
17691             {
17692                 --mark_stack_tos;
17693                 goto next_level;
17694             }
17695             else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
17696             {
17697                 BOOL overflow_p = FALSE;
17698
17699                 if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit  - 1))
17700                 {
17701                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
17702                     if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1))
17703                     {
17704                         overflow_p = TRUE;
17705                     }
17706                 }
17707                 
17708                 if (overflow_p == FALSE)
17709                 {
17710                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
17711
17712                     go_through_object_cl (method_table(oo), oo, s, ppslot,
17713                                           {
17714                                               uint8_t* o = *ppslot;
17715                                               Prefetch(o);
17716                                               if (gc_mark (o, gc_low, gc_high))
17717                                               {
17718                                                   if (full_p)
17719                                                   {
17720                                                       m_boundary_fullgc (o);
17721                                                   }
17722                                                   else
17723                                                   {
17724                                                       m_boundary (o);
17725                                                   }
17726                                                   size_t obj_size = size (o);
17727                                                   promoted_bytes (thread) += obj_size;
17728                                                   if (contain_pointers_or_collectible (o))
17729                                                   {
17730                                                       *(mark_stack_tos++) = o;
17731                                                   }
17732                                               }
17733                                           }
17734                         );
17735                 }
17736                 else
17737                 {
17738                     dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
17739                     min_overflow_address = min (min_overflow_address, oo);
17740                     max_overflow_address = max (max_overflow_address, oo);
17741                 }
17742             }
17743             else
17744             {
17745                 if (partial_p (oo))
17746                 {
17747                     start = ref_from_slot (oo);
17748                     oo = ref_from_slot (*(--mark_stack_tos));
17749                     dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
17750                     assert ((oo < start) && (start < (oo + size (oo))));
17751                 }
17752 #ifdef COLLECTIBLE_CLASS
17753                 else
17754                 {
17755                     // If there's a class object, push it now. We are guaranteed to have the slot since
17756                     // we just popped one object off.
17757                     if (is_collectible (oo))
17758                     {
17759                         uint8_t* class_obj = get_class_object (oo);
17760                         if (gc_mark (class_obj, gc_low, gc_high))
17761                         {
17762                             if (full_p)
17763                             {
17764                                 m_boundary_fullgc (class_obj);
17765                             }
17766                             else
17767                             {
17768                                 m_boundary (class_obj);
17769                             }
17770
17771                             size_t obj_size = size (class_obj);
17772                             promoted_bytes (thread) += obj_size;
17773                             *(mark_stack_tos++) = class_obj;
17774                             // The code below expects that the oo is still stored in the stack slot that was
17775                             // just popped and it "pushes" it back just by incrementing the mark_stack_tos. 
17776                             // But the class_obj has just overwritten that stack slot and so the oo needs to
17777                             // be stored to the new slot that's pointed to by the mark_stack_tos.
17778                             *mark_stack_tos = oo;
17779                         }
17780                     }
17781                 }
17782 #endif //COLLECTIBLE_CLASS
17783
17784                 s = size (oo);
17785                 
17786                 BOOL overflow_p = FALSE;
17787             
17788                 if (mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
17789                 {
17790                     overflow_p = TRUE;
17791                 }
17792                 if (overflow_p == FALSE)
17793                 {
17794                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
17795
17796                     //push the object and its current 
17797                     SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos;
17798                     mark_stack_tos++;
17799 #ifdef MH_SC_MARK
17800                     *(place-1) = 0;
17801                     *(place) = (uint8_t*)partial;
17802 #endif //MH_SC_MARK
17803                     int i = num_partial_refs; 
17804                     uint8_t* ref_to_continue = 0;
17805
17806                     go_through_object (method_table(oo), oo, s, ppslot,
17807                                        start, use_start, (oo + s),
17808                                        {
17809                                            uint8_t* o = *ppslot;
17810                                            Prefetch(o);
17811                                            if (gc_mark (o, gc_low, gc_high))
17812                                            {
17813                                                 if (full_p)
17814                                                 {
17815                                                     m_boundary_fullgc (o);
17816                                                 }
17817                                                 else
17818                                                 {
17819                                                     m_boundary (o);
17820                                                 }
17821                                                 size_t obj_size = size (o);
17822                                                 promoted_bytes (thread) += obj_size;
17823                                                 if (contain_pointers_or_collectible (o))
17824                                                 {
17825                                                     *(mark_stack_tos++) = o;
17826                                                     if (--i == 0)
17827                                                     {
17828                                                         ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial);
17829                                                         goto more_to_do;
17830                                                     }
17831
17832                                                 }
17833                                            }
17834
17835                                        }
17836                         );
17837                     //we are finished with this object
17838                     assert (ref_to_continue == 0);
17839 #ifdef MH_SC_MARK
17840                     assert ((*(place-1)) == (uint8_t*)0);
17841 #else //MH_SC_MARK
17842                     *(place-1) = 0;
17843 #endif //MH_SC_MARK
17844                     *place = 0; 
17845                     // shouldn't we decrease tos by 2 here??
17846
17847 more_to_do:
17848                     if (ref_to_continue)
17849                     {
17850                         //update the start
17851 #ifdef MH_SC_MARK
17852                         assert ((*(place-1)) == (uint8_t*)0);
17853                         *(place-1) = (uint8_t*)((size_t)oo | partial_object);
17854                         assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2));
17855 #endif //MH_SC_MARK
17856                         *place = ref_to_continue;
17857                     }
17858                 }
17859                 else
17860                 {
17861                     dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
17862                     min_overflow_address = min (min_overflow_address, oo);
17863                     max_overflow_address = max (max_overflow_address, oo);
17864                 }
17865             }
17866 #ifdef SORT_MARK_STACK
17867             if (mark_stack_tos > sorted_tos + mark_stack_array_length/8)
17868             {
17869                 rqsort1 (sorted_tos, mark_stack_tos-1);
17870                 sorted_tos = mark_stack_tos-1;
17871             }
17872 #endif //SORT_MARK_STACK
17873         }
17874     next_level:
17875         if (!(mark_stack_empty_p()))
17876         {
17877             oo = *(--mark_stack_tos);
17878             start = oo;
17879
17880 #ifdef SORT_MARK_STACK
17881             sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos);
17882 #endif //SORT_MARK_STACK
17883         }
17884         else
17885             break;
17886     }
17887 }
17888
17889 #ifdef MH_SC_MARK
17890 BOOL same_numa_node_p (int hn1, int hn2)
17891 {
17892     return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2));
17893 }
17894
17895 int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps)
17896 {
17897     int hn = (current_buddy+1)%n_heaps;
17898     while (hn != current_buddy)
17899     {
17900         if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn)))
17901             return hn;
17902         hn = (hn+1)%n_heaps;
17903     }
17904     return current_buddy;
17905 }
17906
17907 void 
17908 gc_heap::mark_steal()
17909 {
17910     mark_stack_busy() = 0;
17911     //clear the mark stack in the snooping range
17912     for (int i = 0; i < max_snoop_level; i++)
17913     {
17914         ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
17915     }
17916
17917     //pick the next heap as our buddy
17918     int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps);
17919
17920 #ifdef SNOOP_STATS
17921         dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
17922         uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
17923 #endif //SNOOP_STATS
17924
17925     int idle_loop_count = 0; 
17926     int first_not_ready_level = 0;
17927
17928     while (1)
17929     {
17930         gc_heap* hp = g_heaps [thpn];
17931         int level = first_not_ready_level;
17932         first_not_ready_level = 0; 
17933
17934         while (check_next_mark_stack (hp) && (level < (max_snoop_level-1)))
17935         {
17936             idle_loop_count = 0; 
17937 #ifdef SNOOP_STATS
17938             snoop_stat.busy_count++;
17939             dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix", 
17940                                  heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level]));
17941 #endif //SNOOP_STATS
17942
17943             uint8_t* o = ref_mark_stack (hp, level);
17944
17945             uint8_t* start = o;
17946             if (ref_p (o))
17947             {
17948                 mark_stack_busy() = 1;
17949
17950                 BOOL success = TRUE;
17951                 uint8_t* next = (ref_mark_stack (hp, level+1));
17952                 if (ref_p (next))
17953                 {
17954                     if (((size_t)o > 4) && !partial_object_p (o))
17955                     {
17956                         //this is a normal object, not a partial mark tuple
17957                         //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o);
17958                         success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o);
17959 #ifdef SNOOP_STATS
17960                         snoop_stat.interlocked_count++;
17961                         if (success)
17962                             snoop_stat.normal_count++;
17963 #endif //SNOOP_STATS
17964                     }
17965                     else
17966                     {
17967                         //it is a stolen entry, or beginning/ending of a partial mark
17968                         level++;
17969 #ifdef SNOOP_STATS
17970                         snoop_stat.stolen_or_pm_count++;
17971 #endif //SNOOP_STATS
17972                         success = FALSE;
17973                     }
17974                 }
17975                 else if (stolen_p (next))
17976                 {
17977                     //ignore the stolen guy and go to the next level
17978                     success = FALSE;
17979                     level+=2;
17980 #ifdef SNOOP_STATS
17981                     snoop_stat.stolen_entry_count++;
17982 #endif //SNOOP_STATS
17983                 }
17984                 else
17985                 {
17986                     assert (partial_p (next));
17987                     start = ref_from_slot (next);
17988                     //re-read the object
17989                     o = ref_from_slot (ref_mark_stack (hp, level));
17990                     if (o && start)
17991                     {
17992                         //steal the object
17993                         success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next)==next);
17994 #ifdef SNOOP_STATS
17995                         snoop_stat.interlocked_count++;
17996                         if (success)
17997                         {
17998                             snoop_stat.partial_mark_parent_count++;                    
17999                         }
18000 #endif //SNOOP_STATS
18001                     }
18002                     else
18003                     {
18004                         // stack is not ready, or o is completely different from the last time we read from this stack level.
18005                         // go up 2 levels to steal children or totally unrelated objects.
18006                         success = FALSE;
18007                         if (first_not_ready_level == 0)
18008                         {
18009                             first_not_ready_level = level;
18010                         }
18011                         level+=2;
18012 #ifdef SNOOP_STATS
18013                         snoop_stat.pm_not_ready_count++;
18014 #endif //SNOOP_STATS                        
18015                     }
18016                 }
18017                 if (success)
18018                 {
18019
18020 #ifdef SNOOP_STATS
18021                     dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
18022                             heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18023                             (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18024                     uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
18025 #endif //SNOOP_STATS
18026
18027                     mark_object_simple1 (o, start, heap_number);
18028
18029 #ifdef SNOOP_STATS
18030                     dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
18031                             heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18032                             (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18033 #endif //SNOOP_STATS
18034
18035                     mark_stack_busy() = 0;
18036
18037                     //clear the mark stack in snooping range
18038                     for (int i = 0; i < max_snoop_level; i++)
18039                     {
18040                         if (((uint8_t**)mark_stack_array)[i] != 0)
18041                         {
18042                             ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
18043 #ifdef SNOOP_STATS
18044                             snoop_stat.stack_bottom_clear_count++;
18045 #endif //SNOOP_STATS
18046                         }
18047                     }
18048
18049                     level = 0; 
18050                 }
18051                 mark_stack_busy() = 0;
18052             }
18053             else
18054             {
18055                 //slot is either partial or stolen
18056                 level++;
18057             }
18058         }
18059         if ((first_not_ready_level != 0) && hp->mark_stack_busy())
18060         {
18061             continue;
18062         } 
18063         if (!hp->mark_stack_busy())
18064         {
18065             first_not_ready_level = 0; 
18066             idle_loop_count++;
18067
18068             if ((idle_loop_count % (6) )==1)
18069             {
18070 #ifdef SNOOP_STATS
18071                 snoop_stat.switch_to_thread_count++;
18072 #endif //SNOOP_STATS
18073                 GCToOSInterface::Sleep(1);
18074             }
18075             int free_count = 1;
18076 #ifdef SNOOP_STATS
18077             snoop_stat.stack_idle_count++;
18078             //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number));
18079 #endif //SNOOP_STATS
18080             for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;)
18081             {
18082                 if (!((g_heaps [hpn])->mark_stack_busy()))
18083                 {
18084                     free_count++;
18085 #ifdef SNOOP_STATS
18086                 dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count));
18087 #endif //SNOOP_STATS
18088                 }
18089                 else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999)
18090                 {
18091                     thpn = hpn;
18092                     break;
18093                 }
18094                 hpn = (hpn+1)%n_heaps;
18095                 YieldProcessor();
18096             }
18097             if (free_count == n_heaps)
18098             {
18099                 break;
18100             }
18101         }
18102     }
18103 }
18104
18105 inline
18106 BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap)
18107 {
18108 #ifdef SNOOP_STATS
18109     snoop_stat.check_level_count++;
18110 #endif //SNOOP_STATS
18111     return (next_heap->mark_stack_busy()>=1);
18112 }
18113 #endif //MH_SC_MARK
18114
18115 #ifdef SNOOP_STATS
18116 void gc_heap::print_snoop_stat()
18117 {
18118     dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", 
18119         "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk"));
18120     dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d",
18121         snoop_stat.heap_index,
18122         snoop_stat.objects_checked_count,
18123         snoop_stat.zero_ref_count,
18124         snoop_stat.objects_marked_count,
18125         snoop_stat.stolen_stack_count,
18126         snoop_stat.partial_stack_count,
18127         snoop_stat.normal_stack_count,
18128         snoop_stat.non_stack_count));
18129     dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s", 
18130         "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear"));
18131     dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18132         snoop_stat.heap_index,
18133         snoop_stat.check_level_count,
18134         snoop_stat.busy_count,
18135         snoop_stat.interlocked_count,
18136         snoop_stat.partial_mark_parent_count,
18137         snoop_stat.stolen_or_pm_count,
18138         snoop_stat.stolen_entry_count,
18139         snoop_stat.pm_not_ready_count,
18140         snoop_stat.normal_count,
18141         snoop_stat.stack_bottom_clear_count));
18142
18143     printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n", 
18144         "heap", "check", "zero", "mark", "idle", "switch");
18145     printf ("%4d | %8d | %8d | %8d | %8d | %8d\n",
18146         snoop_stat.heap_index,
18147         snoop_stat.objects_checked_count,
18148         snoop_stat.zero_ref_count,
18149         snoop_stat.objects_marked_count,
18150         snoop_stat.stack_idle_count,
18151         snoop_stat.switch_to_thread_count);
18152     printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", 
18153         "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
18154     printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18155         snoop_stat.heap_index,
18156         snoop_stat.check_level_count,
18157         snoop_stat.busy_count,
18158         snoop_stat.interlocked_count,
18159         snoop_stat.partial_mark_parent_count,
18160         snoop_stat.stolen_or_pm_count,
18161         snoop_stat.stolen_entry_count,
18162         snoop_stat.pm_not_ready_count,
18163         snoop_stat.normal_count,
18164         snoop_stat.stack_bottom_clear_count);
18165 }
18166 #endif //SNOOP_STATS
18167
18168 #ifdef HEAP_ANALYZE
18169 void
18170 gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18171 {
18172     if (!internal_root_array)
18173     {
18174         internal_root_array = new (nothrow) uint8_t* [internal_root_array_length];
18175         if (!internal_root_array)
18176         {
18177             heap_analyze_success = FALSE;
18178         }
18179     }
18180
18181     if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index))
18182     {
18183         size_t new_size = 2*internal_root_array_length;
18184
18185         uint64_t available_physical = 0;
18186         get_memory_info (NULL, &available_physical);
18187         if (new_size > (size_t)(available_physical / 10))
18188         {
18189             heap_analyze_success = FALSE;
18190         }
18191         else
18192         {
18193             uint8_t** tmp = new (nothrow) uint8_t* [new_size];
18194             if (tmp)
18195             {
18196                 memcpy (tmp, internal_root_array,
18197                         internal_root_array_length*sizeof (uint8_t*));
18198                 delete[] internal_root_array;
18199                 internal_root_array = tmp;
18200                 internal_root_array_length = new_size;
18201             }
18202             else
18203             {
18204                 heap_analyze_success = FALSE;
18205             }
18206         }
18207     }
18208
18209     if (heap_analyze_success)
18210     {
18211         PREFIX_ASSUME(internal_root_array_index < internal_root_array_length);
18212
18213         uint8_t* ref = (uint8_t*)po;
18214         if (!current_obj || 
18215             !((ref >= current_obj) && (ref < (current_obj + current_obj_size))))
18216         {
18217             gc_heap* hp = gc_heap::heap_of (ref);
18218             current_obj = hp->find_object (ref, hp->lowest_address);
18219             current_obj_size = size (current_obj);
18220
18221             internal_root_array[internal_root_array_index] = current_obj;
18222             internal_root_array_index++;
18223         }
18224     }
18225
18226     mark_object_simple (po THREAD_NUMBER_ARG);
18227 }
18228 #endif //HEAP_ANALYZE
18229
18230 //this method assumes that *po is in the [low. high[ range
18231 void
18232 gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18233 {
18234     uint8_t* o = *po;
18235 #ifdef MULTIPLE_HEAPS
18236 #else  //MULTIPLE_HEAPS
18237     const int thread = 0;
18238 #endif //MULTIPLE_HEAPS
18239     {
18240 #ifdef SNOOP_STATS
18241         snoop_stat.objects_checked_count++;
18242 #endif //SNOOP_STATS
18243
18244         if (gc_mark1 (o))
18245         {
18246             m_boundary (o);
18247             size_t s = size (o);
18248             promoted_bytes (thread) += s;
18249             {
18250                 go_through_object_cl (method_table(o), o, s, poo,
18251                                         {
18252                                             uint8_t* oo = *poo;
18253                                             if (gc_mark (oo, gc_low, gc_high))
18254                                             {
18255                                                 m_boundary (oo);
18256                                                 size_t obj_size = size (oo);
18257                                                 promoted_bytes (thread) += obj_size;
18258
18259                                                 if (contain_pointers_or_collectible (oo))
18260                                                     mark_object_simple1 (oo, oo THREAD_NUMBER_ARG);
18261                                             }
18262                                         }
18263                     );
18264             }
18265         }
18266     }
18267 }
18268
18269 inline
18270 uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
18271 {
18272     if ((o >= gc_low) && (o < gc_high))
18273         mark_object_simple (&o THREAD_NUMBER_ARG);
18274 #ifdef MULTIPLE_HEAPS
18275     else if (o)
18276     {
18277         //find the heap
18278         gc_heap* hp = heap_of (o);
18279         assert (hp);
18280         if ((o >= hp->gc_low) && (o < hp->gc_high))
18281             mark_object_simple (&o THREAD_NUMBER_ARG);
18282     }
18283 #endif //MULTIPLE_HEAPS
18284
18285     return o;
18286 }
18287
18288 #ifdef BACKGROUND_GC
18289
18290 void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL)
18291 {
18292     uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length];
18293
18294 #ifdef SORT_MARK_STACK
18295     uint8_t** sorted_tos = background_mark_stack_array;
18296 #endif //SORT_MARK_STACK
18297
18298     background_mark_stack_tos = background_mark_stack_array;
18299
18300     while (1)
18301     {
18302 #ifdef MULTIPLE_HEAPS
18303 #else  //MULTIPLE_HEAPS
18304         const int thread = 0;
18305 #endif //MULTIPLE_HEAPS
18306         if (oo)
18307         {
18308             size_t s = 0; 
18309             if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
18310             {
18311                 BOOL overflow_p = FALSE;
18312             
18313                 if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1))
18314                 {
18315                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18316                     size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18317                     if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1))
18318                     {
18319                         dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs", 
18320                             heap_number,
18321                             (size_t)(mark_stack_limit - 1 - background_mark_stack_tos),
18322                             method_table(oo), 
18323                             num_pointers));
18324
18325                         bgc_overflow_count++;
18326                         overflow_p = TRUE;
18327                     }
18328                 }
18329             
18330                 if (overflow_p == FALSE)
18331                 {
18332                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18333
18334                     go_through_object_cl (method_table(oo), oo, s, ppslot,
18335                     {
18336                         uint8_t* o = *ppslot;
18337                         Prefetch(o);
18338                         if (background_mark (o, 
18339                                              background_saved_lowest_address, 
18340                                              background_saved_highest_address))
18341                         {
18342                             //m_boundary (o);
18343                             size_t obj_size = size (o);
18344                             bpromoted_bytes (thread) += obj_size;
18345                             if (contain_pointers_or_collectible (o))
18346                             {
18347                                 *(background_mark_stack_tos++) = o;
18348
18349                             }
18350                         }
18351                     }
18352                         );
18353                 }
18354                 else
18355                 {
18356                     dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18357                     background_min_overflow_address = min (background_min_overflow_address, oo);
18358                     background_max_overflow_address = max (background_max_overflow_address, oo);
18359                 }
18360             }
18361             else 
18362             {
18363                 uint8_t* start = oo;
18364                 if ((size_t)oo & 1)
18365                 {
18366                     oo = (uint8_t*)((size_t)oo & ~1);
18367                     start = *(--background_mark_stack_tos);
18368                     dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
18369                 }
18370 #ifdef COLLECTIBLE_CLASS
18371                 else
18372                 {
18373                     // If there's a class object, push it now. We are guaranteed to have the slot since
18374                     // we just popped one object off.
18375                     if (is_collectible (oo))
18376                     {
18377                         uint8_t* class_obj = get_class_object (oo);
18378                         if (background_mark (class_obj, 
18379                                             background_saved_lowest_address, 
18380                                             background_saved_highest_address))
18381                         {
18382                             size_t obj_size = size (class_obj);
18383                             bpromoted_bytes (thread) += obj_size;
18384
18385                             *(background_mark_stack_tos++) = class_obj;
18386                         }
18387                     }
18388                 }
18389 #endif //COLLECTIBLE_CLASS
18390
18391                 s = size (oo);
18392                 
18393                 BOOL overflow_p = FALSE;
18394             
18395                 if (background_mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
18396                 {
18397                     size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18398                     size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18399
18400                     dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id", 
18401                         heap_number,
18402                         (size_t)(mark_stack_limit - background_mark_stack_tos),
18403                         oo,
18404                         method_table(oo), 
18405                         start,
18406                         num_pointers));
18407
18408                     bgc_overflow_count++;
18409                     overflow_p = TRUE;
18410                 }
18411                 if (overflow_p == FALSE)
18412                 {
18413                     dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18414
18415                     //push the object and its current 
18416                     uint8_t** place = background_mark_stack_tos++;
18417                     *(place) = start;
18418                     *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1);
18419
18420                     int i = num_partial_refs; 
18421
18422                     go_through_object (method_table(oo), oo, s, ppslot,
18423                                        start, use_start, (oo + s),
18424                     {
18425                         uint8_t* o = *ppslot;
18426                         Prefetch(o);
18427
18428                         if (background_mark (o, 
18429                                             background_saved_lowest_address, 
18430                                             background_saved_highest_address))
18431                         {
18432                             //m_boundary (o);
18433                             size_t obj_size = size (o);
18434                             bpromoted_bytes (thread) += obj_size;
18435                             if (contain_pointers_or_collectible (o))
18436                             {
18437                                 *(background_mark_stack_tos++) = o;
18438                                 if (--i == 0)
18439                                 {
18440                                     //update the start
18441                                     *place = (uint8_t*)(ppslot+1);
18442                                     goto more_to_do;
18443                                 }
18444
18445                             }
18446                         }
18447
18448                     }
18449                         );
18450                     //we are finished with this object
18451                     *place = 0; 
18452                     *(place+1) = 0;
18453
18454                 more_to_do:;
18455                 }
18456                 else
18457                 {
18458                     dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18459                     background_min_overflow_address = min (background_min_overflow_address, oo);
18460                     background_max_overflow_address = max (background_max_overflow_address, oo);
18461                 }
18462             }
18463         }
18464 #ifdef SORT_MARK_STACK
18465         if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8)
18466         {
18467             rqsort1 (sorted_tos, background_mark_stack_tos-1);
18468             sorted_tos = background_mark_stack_tos-1;
18469         }
18470 #endif //SORT_MARK_STACK
18471
18472         allow_fgc();
18473
18474         if (!(background_mark_stack_tos == background_mark_stack_array))
18475         {
18476             oo = *(--background_mark_stack_tos);
18477
18478 #ifdef SORT_MARK_STACK
18479             sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos);
18480 #endif //SORT_MARK_STACK
18481         }
18482         else
18483             break;
18484     }
18485
18486     assert (background_mark_stack_tos == background_mark_stack_array);
18487
18488
18489 }
18490
18491 //this version is different than the foreground GC because
18492 //it can't keep pointers to the inside of an object
18493 //while calling background_mark_simple1. The object could be moved
18494 //by an intervening foreground gc.
18495 //this method assumes that *po is in the [low. high[ range
18496 void
18497 gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL)
18498 {
18499 #ifdef MULTIPLE_HEAPS
18500 #else  //MULTIPLE_HEAPS
18501     const int thread = 0;
18502 #endif //MULTIPLE_HEAPS
18503     {
18504         dprintf (3, ("bmarking %Ix", o));
18505         
18506         if (background_mark1 (o))
18507         {
18508             //m_boundary (o);
18509             size_t s = size (o);
18510             bpromoted_bytes (thread) += s;
18511
18512             if (contain_pointers_or_collectible (o))
18513             {
18514                 background_mark_simple1 (o THREAD_NUMBER_ARG);
18515             }
18516         }
18517     }
18518 }
18519
18520 inline
18521 uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
18522 {
18523     if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
18524     {
18525         background_mark_simple (o THREAD_NUMBER_ARG);
18526     }
18527     else
18528     {
18529         if (o)
18530         {
18531             dprintf (3, ("or-%Ix", o));
18532         }
18533     }
18534     return o;
18535 }
18536
18537 void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
18538 {
18539     UNREFERENCED_PARAMETER(sc);
18540
18541     assert (settings.concurrent);
18542     uint8_t* o = (uint8_t*)object;
18543
18544     gc_heap* hp = gc_heap::heap_of (o);
18545 #ifdef INTERIOR_POINTERS
18546     if (flags & GC_CALL_INTERIOR)
18547     {
18548         o = hp->find_object (o, background_saved_lowest_address);
18549     }
18550 #endif //INTERIOR_POINTERS
18551
18552     if (!background_object_marked (o, FALSE))
18553     {
18554         FATAL_GC_ERROR();
18555     }
18556 }
18557
18558 void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
18559 {
18560     UNREFERENCED_PARAMETER(sc);
18561     //in order to save space on the array, mark the object,
18562     //knowing that it will be visited later
18563     assert (settings.concurrent);
18564
18565     THREAD_NUMBER_FROM_CONTEXT;
18566 #ifndef MULTIPLE_HEAPS
18567     const int thread = 0;
18568 #endif //!MULTIPLE_HEAPS
18569
18570     uint8_t* o = (uint8_t*)*ppObject;
18571
18572     if (o == 0)
18573         return;
18574
18575 #ifdef DEBUG_DestroyedHandleValue
18576     // we can race with destroy handle during concurrent scan
18577     if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
18578         return;
18579 #endif //DEBUG_DestroyedHandleValue
18580
18581     HEAP_FROM_THREAD;
18582
18583     gc_heap* hp = gc_heap::heap_of (o);
18584
18585     if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
18586     {
18587         return;
18588     }
18589
18590 #ifdef INTERIOR_POINTERS
18591     if (flags & GC_CALL_INTERIOR)
18592     {
18593         o = hp->find_object (o, hp->background_saved_lowest_address);
18594         if (o == 0)
18595             return;
18596     }
18597 #endif //INTERIOR_POINTERS
18598
18599 #ifdef FEATURE_CONSERVATIVE_GC
18600     // For conservative GC, a value on stack may point to middle of a free object.
18601     // In this case, we don't need to promote the pointer.
18602     if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
18603     {
18604         return;
18605     }
18606 #endif //FEATURE_CONSERVATIVE_GC
18607
18608 #ifdef _DEBUG
18609     ((CObjectHeader*)o)->Validate();
18610 #endif //_DEBUG
18611
18612     dprintf (BGC_LOG, ("Background Promote %Ix", (size_t)o));
18613
18614     //needs to be called before the marking because it is possible for a foreground
18615     //gc to take place during the mark and move the object
18616     STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
18617
18618     hpt->background_mark_simple (o THREAD_NUMBER_ARG);
18619 }
18620
18621 //used by the ephemeral collection to scan the local background structures
18622 //containing references.
18623 void
18624 gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
18625 {
18626     ScanContext sc;
18627     if (pSC == 0)
18628         pSC = &sc;
18629
18630     pSC->thread_number = hn;
18631
18632 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
18633     pSC->pCurrentDomain = 0;
18634 #endif
18635
18636     BOOL relocate_p = (fn == &GCHeap::Relocate);
18637
18638     dprintf (3, ("Scanning background mark list"));
18639
18640     //scan mark_list
18641     size_t mark_list_finger = 0;
18642     while (mark_list_finger < c_mark_list_index)
18643     {
18644         uint8_t** o = &c_mark_list [mark_list_finger];
18645         if (!relocate_p)
18646         {
18647             // We may not be able to calculate the size during relocate as POPO
18648             // may have written over the object.
18649             size_t s = size (*o);
18650             assert (Align (s) >= Align (min_obj_size));
18651             dprintf(3,("background root %Ix", (size_t)*o));
18652         }
18653         (*fn) ((Object**)o, pSC, 0);
18654         mark_list_finger++;
18655     }
18656
18657     //scan the mark stack
18658     dprintf (3, ("Scanning background mark stack"));
18659
18660     uint8_t** finger = background_mark_stack_array;
18661     while (finger < background_mark_stack_tos)
18662     {
18663         if ((finger + 1) < background_mark_stack_tos)
18664         {
18665             // We need to check for the partial mark case here.
18666             uint8_t* parent_obj = *(finger + 1);
18667             if ((size_t)parent_obj & 1)
18668             {
18669                 uint8_t* place = *finger;
18670                 size_t place_offset = 0;
18671                 uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1);
18672
18673                 if (relocate_p)
18674                 {
18675                     *(finger + 1) = real_parent_obj;
18676                     place_offset = place - real_parent_obj;
18677                     dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj));
18678                     (*fn) ((Object**)(finger + 1), pSC, 0);
18679                     real_parent_obj = *(finger + 1);
18680                     *finger = real_parent_obj + place_offset;
18681                     *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1);
18682                     dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1)));
18683                 }
18684                 else
18685                 {
18686                     uint8_t** temp = &real_parent_obj;
18687                     dprintf(3,("marking background root %Ix", (size_t)real_parent_obj));
18688                     (*fn) ((Object**)temp, pSC, 0);
18689                 }
18690
18691                 finger += 2;
18692                 continue;
18693             }
18694         }
18695         dprintf(3,("background root %Ix", (size_t)*finger));
18696         (*fn) ((Object**)finger, pSC, 0);
18697         finger++;
18698     }
18699 }
18700
18701 inline
18702 void gc_heap::background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL)
18703 {
18704     if (contain_pointers (oo))
18705     {
18706         size_t total_refs = 0;
18707         size_t s = size (oo);
18708         go_through_object_nostart (method_table(oo), oo, s, po,
18709                           {
18710                             uint8_t* o = *po;
18711                             total_refs++;
18712                             background_mark_object (o THREAD_NUMBER_ARG);
18713                           }
18714             );
18715
18716         dprintf (3,("Background marking through %Ix went through %Id refs", 
18717                           (size_t)oo,
18718                            total_refs));
18719     }
18720 }
18721
18722 uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p)
18723 {
18724     if (concurrent_p && (seg == saved_overflow_ephemeral_seg))
18725     {
18726         // for now we stop at where gen1 started when we started processing 
18727         return background_min_soh_overflow_address;
18728     }
18729     else
18730     {
18731         return heap_segment_allocated (seg);
18732     }
18733 }
18734
18735 uint8_t* gc_heap::background_first_overflow (uint8_t* min_add,
18736                                           heap_segment* seg,
18737                                           BOOL concurrent_p, 
18738                                           BOOL small_object_p)
18739 {
18740     uint8_t* o = 0;
18741
18742     if (small_object_p)
18743     {
18744         if (in_range_for_segment (min_add, seg))
18745         {
18746             // min_add was the beginning of gen1 when we did the concurrent
18747             // overflow. Now we could be in a situation where min_add is
18748             // actually the same as allocated for that segment (because
18749             // we expanded heap), in which case we can not call 
18750             // find first on this address or we will AV.
18751             if (min_add >= heap_segment_allocated (seg))
18752             {
18753                 return min_add;
18754             }
18755             else
18756             {
18757                 if (concurrent_p && 
18758                     ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address)))
18759                 {
18760                     return background_min_soh_overflow_address;
18761                 }
18762                 else
18763                 {
18764                     o = find_first_object (min_add, heap_segment_mem (seg));
18765                     return o;
18766                 }
18767             }
18768         }
18769     }
18770
18771     o = max (heap_segment_mem (seg), min_add);
18772     return o;
18773 }
18774
18775 void gc_heap::background_process_mark_overflow_internal (int condemned_gen_number,
18776                                                          uint8_t* min_add, uint8_t* max_add,
18777                                                          BOOL concurrent_p)
18778 {
18779     if (concurrent_p)
18780     {
18781         current_bgc_state = bgc_overflow_soh;
18782     }
18783
18784     size_t total_marked_objects = 0;
18785
18786 #ifdef MULTIPLE_HEAPS
18787     int thread = heap_number;
18788 #endif //MULTIPLE_HEAPS
18789
18790     exclusive_sync* loh_alloc_lock = 0;
18791
18792     dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
18793 #ifdef MULTIPLE_HEAPS
18794     // We don't have each heap scan all heaps concurrently because we are worried about
18795     // multiple threads calling things like find_first_object.
18796     int h_start = (concurrent_p ? heap_number : 0);
18797     int h_end = (concurrent_p ? (heap_number + 1) : n_heaps);
18798     for (int hi = h_start; hi < h_end; hi++)
18799     {
18800         gc_heap*  hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]);
18801
18802 #else
18803     {
18804         gc_heap*  hp = 0;
18805
18806 #endif //MULTIPLE_HEAPS
18807         BOOL small_object_segments = TRUE;
18808         int align_const = get_alignment_constant (small_object_segments);
18809         generation* gen = hp->generation_of (condemned_gen_number);
18810         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
18811         PREFIX_ASSUME(seg != NULL);
18812         loh_alloc_lock = hp->bgc_alloc_lock;
18813
18814         uint8_t* o = hp->background_first_overflow (min_add,
18815                                                     seg, 
18816                                                     concurrent_p, 
18817                                                     small_object_segments);
18818
18819         while (1)
18820         {
18821             while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= max_add))
18822             {
18823                 dprintf (3, ("considering %Ix", (size_t)o));
18824
18825                 size_t s;
18826
18827                 if (concurrent_p && !small_object_segments)
18828                 {
18829                     loh_alloc_lock->bgc_mark_set (o);
18830
18831                     if (((CObjectHeader*)o)->IsFree())
18832                     {
18833                         s = unused_array_size (o);
18834                     }
18835                     else
18836                     {
18837                         s = size (o);
18838                     }
18839                 }
18840                 else
18841                 {
18842                     s = size (o);
18843                 }
18844
18845                 if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o))
18846                 {
18847                     total_marked_objects++;
18848                     go_through_object_cl (method_table(o), o, s, poo,
18849                                           uint8_t* oo = *poo;
18850                                           background_mark_object (oo THREAD_NUMBER_ARG);
18851                                          );
18852                 }
18853
18854                 if (concurrent_p && !small_object_segments)
18855                 {
18856                     loh_alloc_lock->bgc_mark_done ();
18857                 }
18858
18859                 o = o + Align (s, align_const);
18860
18861                 if (concurrent_p)
18862                 {
18863                     allow_fgc();
18864                 }
18865             }
18866
18867             dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)", 
18868                 heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects));
18869
18870             if ((concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) ||
18871                 (seg = heap_segment_next_in_range (seg)) == 0)
18872             {
18873                 if (small_object_segments)
18874                 {
18875                     if (concurrent_p)
18876                     {
18877                         current_bgc_state = bgc_overflow_loh;
18878                     }
18879
18880                     dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects));
18881                     fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
18882                     concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH");
18883                     total_marked_objects = 0;
18884                     small_object_segments = FALSE;
18885                     align_const = get_alignment_constant (small_object_segments);
18886                     seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
18887
18888                     PREFIX_ASSUME(seg != NULL);
18889
18890                     o = max (heap_segment_mem (seg), min_add);
18891                     continue;
18892                 }
18893                 else
18894                 {
18895                     dprintf (GTC_LOG, ("h%d: LOH: ov-mo: %Id", heap_number, total_marked_objects));
18896                     fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
18897                     break;
18898                 }
18899             } 
18900             else
18901             {
18902                 o = hp->background_first_overflow (min_add, 
18903                                                    seg, 
18904                                                    concurrent_p, 
18905                                                    small_object_segments);
18906                 continue;
18907             }
18908         }
18909     }
18910 }
18911
18912 BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p)
18913 {
18914     BOOL grow_mark_array_p = TRUE;
18915
18916     if (concurrent_p)
18917     {
18918         assert (!processed_soh_overflow_p);
18919
18920         if ((background_max_overflow_address != 0) &&
18921             (background_min_overflow_address != MAX_PTR))
18922         {
18923             // We have overflow to process but we know we can't process the ephemeral generations
18924             // now (we actually could process till the current gen1 start but since we are going to 
18925             // make overflow per segment, for now I'll just stop at the saved gen1 start.
18926             saved_overflow_ephemeral_seg = ephemeral_heap_segment;
18927             background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg);
18928             background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation-1));
18929         }
18930     }
18931     else
18932     {
18933         assert ((saved_overflow_ephemeral_seg == 0) || 
18934                 ((background_max_soh_overflow_address != 0) &&
18935                  (background_min_soh_overflow_address != MAX_PTR)));
18936         
18937         if (!processed_soh_overflow_p)
18938         {
18939             // if there was no more overflow we just need to process what we didn't process 
18940             // on the saved ephemeral segment.
18941             if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR))
18942             {
18943                 dprintf (2, ("final processing mark overflow - no more overflow since last time"));
18944                 grow_mark_array_p = FALSE;
18945             }
18946
18947             background_min_overflow_address = min (background_min_overflow_address, 
18948                                                 background_min_soh_overflow_address);
18949             background_max_overflow_address = max (background_max_overflow_address,
18950                                                 background_max_soh_overflow_address);
18951             processed_soh_overflow_p = TRUE;
18952         }
18953     }
18954
18955     BOOL  overflow_p = FALSE;
18956 recheck:
18957     if ((! ((background_max_overflow_address == 0)) ||
18958          ! ((background_min_overflow_address == MAX_PTR))))
18959     {
18960         overflow_p = TRUE;
18961
18962         if (grow_mark_array_p)
18963         {
18964             // Try to grow the array.
18965             size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length);
18966
18967             if ((new_size * sizeof(mark)) > 100*1024)
18968             {
18969                 size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
18970
18971                 new_size = min(new_max_size, new_size);
18972             }
18973
18974             if ((background_mark_stack_array_length < new_size) && 
18975                 ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2)))
18976             {
18977                 dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size));
18978
18979                 uint8_t** tmp = new (nothrow) uint8_t* [new_size];
18980                 if (tmp)
18981                 {
18982                     delete background_mark_stack_array;
18983                     background_mark_stack_array = tmp;
18984                     background_mark_stack_array_length = new_size;
18985                     background_mark_stack_tos = background_mark_stack_array;
18986                 }
18987             }
18988         }
18989         else
18990         {
18991             grow_mark_array_p = TRUE;
18992         }
18993
18994         uint8_t*  min_add = background_min_overflow_address;
18995         uint8_t*  max_add = background_max_overflow_address;
18996
18997         background_max_overflow_address = 0;
18998         background_min_overflow_address = MAX_PTR;
18999
19000         background_process_mark_overflow_internal (max_generation, min_add, max_add, concurrent_p);
19001         if (!concurrent_p)
19002         {        
19003             goto recheck;
19004         }
19005     }
19006
19007     return overflow_p;
19008 }
19009
19010 #endif //BACKGROUND_GC
19011
19012 inline
19013 void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL)
19014 {
19015 #ifndef COLLECTIBLE_CLASS
19016     UNREFERENCED_PARAMETER(mark_class_object_p);
19017     BOOL to_mark_class_object = FALSE;
19018 #else //COLLECTIBLE_CLASS
19019     BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo)));
19020 #endif //COLLECTIBLE_CLASS
19021     if (contain_pointers (oo) || to_mark_class_object)
19022     {
19023         dprintf(3,( "Marking through %Ix", (size_t)oo));
19024         size_t s = size (oo);
19025
19026 #ifdef COLLECTIBLE_CLASS
19027         if (to_mark_class_object)
19028         {
19029             uint8_t* class_obj = get_class_object (oo);
19030             mark_object (class_obj THREAD_NUMBER_ARG);
19031         }
19032 #endif //COLLECTIBLE_CLASS
19033
19034         if (contain_pointers (oo))
19035         {
19036             go_through_object_nostart (method_table(oo), oo, s, po,
19037                                 uint8_t* o = *po;
19038                                 mark_object (o THREAD_NUMBER_ARG);
19039                                 );
19040         }
19041     }
19042 }
19043
19044 size_t gc_heap::get_total_heap_size()
19045 {
19046     size_t total_heap_size = 0;
19047
19048 #ifdef MULTIPLE_HEAPS
19049     int hn = 0;
19050
19051     for (hn = 0; hn < gc_heap::n_heaps; hn++)
19052     {
19053         gc_heap* hp2 = gc_heap::g_heaps [hn];
19054         total_heap_size += hp2->generation_size (max_generation + 1) + hp2->generation_sizes (hp2->generation_of (max_generation));
19055     }
19056 #else
19057     total_heap_size = generation_size (max_generation + 1) + generation_sizes (generation_of (max_generation));
19058 #endif //MULTIPLE_HEAPS
19059
19060     return total_heap_size;
19061 }
19062
19063 size_t gc_heap::get_total_fragmentation()
19064 {
19065     size_t total_fragmentation = 0;
19066
19067 #ifdef MULTIPLE_HEAPS
19068     for (int i = 0; i < gc_heap::n_heaps; i++)
19069     {
19070         gc_heap* hp = gc_heap::g_heaps[i];
19071 #else //MULTIPLE_HEAPS
19072     {
19073         gc_heap* hp = pGenGCHeap;
19074 #endif //MULTIPLE_HEAPS
19075         for (int i = 0; i <= (max_generation + 1); i++)
19076         {
19077             generation* gen = hp->generation_of (i);
19078             total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
19079         }
19080     }
19081
19082     return total_fragmentation;
19083 }
19084
19085 size_t gc_heap::committed_size()
19086 {
19087     generation* gen = generation_of (max_generation);
19088     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
19089     size_t total_committed = 0;
19090
19091     while (1)
19092     {
19093         total_committed += heap_segment_committed (seg) - (uint8_t*)seg;
19094
19095         seg = heap_segment_next (seg);
19096         if (!seg)
19097         {
19098             if (gen != large_object_generation)
19099             {
19100                 gen = generation_of (max_generation + 1);
19101                 seg = generation_start_segment (gen);
19102             }
19103             else
19104                 break;
19105         }
19106     }
19107
19108     return total_committed;
19109 }
19110
19111 size_t gc_heap::get_total_committed_size()
19112 {
19113     size_t total_committed = 0;
19114
19115 #ifdef MULTIPLE_HEAPS
19116     int hn = 0;
19117
19118     for (hn = 0; hn < gc_heap::n_heaps; hn++)
19119     {
19120         gc_heap* hp = gc_heap::g_heaps [hn];
19121         total_committed += hp->committed_size();
19122     }
19123 #else
19124     total_committed = committed_size();
19125 #endif //MULTIPLE_HEAPS
19126
19127     return total_committed;
19128 }
19129
19130 void gc_heap::get_memory_info (uint32_t* memory_load, 
19131                                uint64_t* available_physical,
19132                                uint64_t* available_page_file)
19133 {
19134     GCToOSInterface::GetMemoryStatus(memory_load, available_physical, available_page_file);
19135 }
19136
19137 void fire_mark_event (int heap_num, int root_type, size_t bytes_marked)
19138 {
19139     dprintf (DT_LOG_0, ("-----------[%d]mark %d: %Id", heap_num, root_type, bytes_marked));
19140     FIRE_EVENT(GCMarkWithType, heap_num, root_type, bytes_marked);
19141 }
19142
19143 //returns TRUE is an overflow happened.
19144 BOOL gc_heap::process_mark_overflow(int condemned_gen_number)
19145 {
19146     size_t last_promoted_bytes = promoted_bytes (heap_number);
19147     BOOL  overflow_p = FALSE;
19148 recheck:
19149     if ((! (max_overflow_address == 0) ||
19150          ! (min_overflow_address == MAX_PTR)))
19151     {
19152         overflow_p = TRUE;
19153         // Try to grow the array.
19154         size_t new_size =
19155             max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length);
19156
19157         if ((new_size * sizeof(mark)) > 100*1024)
19158         {
19159             size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
19160
19161             new_size = min(new_max_size, new_size);
19162         }
19163
19164         if ((mark_stack_array_length < new_size) && 
19165             ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2)))
19166         {
19167             mark* tmp = new (nothrow) mark [new_size];
19168             if (tmp)
19169             {
19170                 delete mark_stack_array;
19171                 mark_stack_array = tmp;
19172                 mark_stack_array_length = new_size;
19173             }
19174         }
19175
19176         uint8_t*  min_add = min_overflow_address;
19177         uint8_t*  max_add = max_overflow_address;
19178         max_overflow_address = 0;
19179         min_overflow_address = MAX_PTR;
19180         process_mark_overflow_internal (condemned_gen_number, min_add, max_add);
19181         goto recheck;
19182     }
19183
19184     size_t current_promoted_bytes = promoted_bytes (heap_number);
19185
19186     if (current_promoted_bytes != last_promoted_bytes)
19187         fire_mark_event (heap_number, ETW::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
19188     return overflow_p;
19189 }
19190
19191 void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
19192                                               uint8_t* min_add, uint8_t* max_add)
19193 {
19194 #ifdef MULTIPLE_HEAPS
19195     int thread = heap_number;
19196 #endif //MULTIPLE_HEAPS
19197     BOOL  full_p = (condemned_gen_number == max_generation);
19198
19199         dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
19200 #ifdef MULTIPLE_HEAPS
19201             for (int hi = 0; hi < n_heaps; hi++)
19202             {
19203                 gc_heap*  hp = g_heaps [(heap_number + hi) % n_heaps];
19204
19205 #else
19206             {
19207                 gc_heap*  hp = 0;
19208
19209 #endif //MULTIPLE_HEAPS
19210         BOOL small_object_segments = TRUE;
19211         int align_const = get_alignment_constant (small_object_segments);
19212         generation* gen = hp->generation_of (condemned_gen_number);
19213         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
19214         
19215         PREFIX_ASSUME(seg != NULL);
19216         uint8_t*  o = max (heap_segment_mem (seg), min_add);
19217         while (1)
19218         {
19219             uint8_t*  end = heap_segment_allocated (seg);
19220
19221             while ((o < end) && (o <= max_add))
19222             {
19223                 assert ((min_add <= o) && (max_add >= o));
19224                 dprintf (3, ("considering %Ix", (size_t)o));
19225                 if (marked (o))
19226                 {
19227                     mark_through_object (o, TRUE THREAD_NUMBER_ARG);
19228                 }
19229
19230                 o = o + Align (size (o), align_const);
19231             }
19232
19233             if (( seg = heap_segment_next_in_range (seg)) == 0)
19234             {
19235                 if (small_object_segments && full_p)
19236                 {
19237                     small_object_segments = FALSE;
19238                     align_const = get_alignment_constant (small_object_segments);
19239                     seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
19240
19241                     PREFIX_ASSUME(seg != NULL);
19242
19243                     o = max (heap_segment_mem (seg), min_add);
19244                     continue;
19245                 }
19246                 else
19247                 {
19248                     break;
19249                 } 
19250             } 
19251             else
19252             {
19253                 o = max (heap_segment_mem (seg), min_add);
19254                 continue;
19255             }
19256         }
19257     }
19258 }
19259
19260 // Scanning for promotion for dependent handles need special handling. Because the primary holds a strong
19261 // reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of
19262 // promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the
19263 // promotion scan multiple times.
19264 // This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It
19265 // also has the effect of processing any mark stack overflow.
19266
19267 #ifdef MULTIPLE_HEAPS
19268 // When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC
19269 // worker threads synchronized. The algorithms are sufficiently divergent that we have different
19270 // implementations based on whether MULTIPLE_HEAPS is defined or not.
19271 //
19272 // Define some static variables used for synchronization in the method below. These should really be defined
19273 // locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class.
19274 //
19275 // A note about the synchronization used within this method. Communication between the worker threads is
19276 // achieved via two shared booleans (defined below). These both act as latches that are transitioned only from
19277 // false -> true by unsynchronized code. They are only read or reset to false by a single thread under the
19278 // protection of a join.
19279 static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE;
19280 static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE;
19281 static VOLATILE(BOOL) s_fScanRequired;
19282 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19283 {
19284     // Whenever we call this method there may have been preceding object promotions. So set
19285     // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19286     // based on the how the scanning proceeded).
19287     s_fUnscannedPromotions = TRUE;
19288
19289     // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
19290     // the state of this thread's portion of the dependent handle table. That's because promotions on other
19291     // threads could cause handle promotions to become necessary here. Even if there are definitely no more
19292     // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
19293     // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
19294     // as all the others or they'll get out of step).
19295     while (true)
19296     {
19297         // The various worker threads are all currently racing in this code. We need to work out if at least
19298         // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
19299         // dependent handle table when both of the following conditions apply:
19300         //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
19301         //     object happens to correspond to a primary in one of our handles we might potentially have to
19302         //     promote the associated secondary).
19303         //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
19304         //
19305         // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
19306         // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
19307         // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
19308         // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
19309         // follows below. Note that we can't read this outside of the join since on any iteration apart from
19310         // the first threads will be racing between reading this value and completing their previous
19311         // iteration's table scan.
19312         //
19313         // The second condition is tracked by the dependent handle code itself on a per worker thread basis
19314         // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
19315         // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
19316         // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
19317         // we're safely joined.
19318         if (GCScan::GcDhUnpromotedHandlesExist(sc))
19319             s_fUnpromotedHandles = TRUE;
19320
19321         // Synchronize all the threads so we can read our state variables safely. The shared variable
19322         // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by
19323         // a single thread inside the join.
19324         gc_t_join.join(this, gc_join_scan_dependent_handles);
19325         if (gc_t_join.joined())
19326         {
19327             // We're synchronized so it's safe to read our shared state variables. We update another shared
19328             // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
19329             // the loop. We scan if there has been at least one object promotion since last time and at least
19330             // one thread has a dependent handle table with a potential handle promotion possible.
19331             s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
19332
19333             // Reset our shared state variables (ready to be set again on this scan or with a good initial
19334             // value for the next call if we're terminating the loop).
19335             s_fUnscannedPromotions = FALSE;
19336             s_fUnpromotedHandles = FALSE;
19337
19338             if (!s_fScanRequired)
19339             {
19340                 // We're terminating the loop. Perform any last operations that require single threaded access.
19341                 if (!initial_scan_p)
19342                 {
19343                     // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help
19344                     // load balance if some of the heaps have an abnormally large workload.
19345                     uint8_t* all_heaps_max = 0;
19346                     uint8_t* all_heaps_min = MAX_PTR;
19347                     int i;
19348                     for (i = 0; i < n_heaps; i++)
19349                     {
19350                         if (all_heaps_max < g_heaps[i]->max_overflow_address)
19351                             all_heaps_max = g_heaps[i]->max_overflow_address;
19352                         if (all_heaps_min > g_heaps[i]->min_overflow_address)
19353                             all_heaps_min = g_heaps[i]->min_overflow_address;
19354                     }
19355                     for (i = 0; i < n_heaps; i++)
19356                     {
19357                         g_heaps[i]->max_overflow_address = all_heaps_max;
19358                         g_heaps[i]->min_overflow_address = all_heaps_min;
19359                     }
19360                 }
19361             }
19362
19363             // Restart all the workers.
19364             dprintf(3, ("Starting all gc thread mark stack overflow processing"));
19365             gc_t_join.restart();
19366         }
19367
19368         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19369         // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
19370         // global flag indicating that at least one object promotion may have occurred (the usual comment
19371         // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
19372         // exit the method since we unconditionally set this variable on method entry anyway).
19373         if (process_mark_overflow(condemned_gen_number))
19374             s_fUnscannedPromotions = TRUE;
19375
19376         // If we decided that no scan was required we can terminate the loop now.
19377         if (!s_fScanRequired)
19378             break;
19379
19380         // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
19381         // processed before we start scanning dependent handle tables (if overflows remain while we scan we
19382         // could miss noting the promotion of some primary objects).
19383         gc_t_join.join(this, gc_join_rescan_dependent_handles);
19384         if (gc_t_join.joined())
19385         {
19386             // Restart all the workers.
19387             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
19388             gc_t_join.restart();
19389         }
19390
19391         // If the portion of the dependent handle table managed by this worker has handles that could still be
19392         // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
19393         // could require a rescan of handles on this or other workers.
19394         if (GCScan::GcDhUnpromotedHandlesExist(sc))
19395             if (GCScan::GcDhReScan(sc))
19396                 s_fUnscannedPromotions = TRUE;
19397     }
19398 }
19399 #else //MULTIPLE_HEAPS
19400 // Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker
19401 // threads synchronized.
19402 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19403 {
19404     UNREFERENCED_PARAMETER(initial_scan_p);
19405
19406     // Whenever we call this method there may have been preceding object promotions. So set
19407     // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19408     // based on the how the scanning proceeded).
19409     bool fUnscannedPromotions = true;
19410
19411     // Loop until there are either no more dependent handles that can have their secondary promoted or we've
19412     // managed to perform a scan without promoting anything new.
19413     while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
19414     {
19415         // On each iteration of the loop start with the assumption that no further objects have been promoted.
19416         fUnscannedPromotions = false;
19417
19418         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19419         // being visible. If there was an overflow (process_mark_overflow returned true) then additional
19420         // objects now appear to be promoted and we should set the flag.
19421         if (process_mark_overflow(condemned_gen_number))
19422             fUnscannedPromotions = true;
19423
19424         // Perform the scan and set the flag if any promotions resulted.
19425         if (GCScan::GcDhReScan(sc))
19426             fUnscannedPromotions = true;
19427     }
19428
19429     // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to
19430     // scan any handles at all this is the processing of overflows that may have occurred prior to this method
19431     // invocation).
19432     process_mark_overflow(condemned_gen_number);
19433 }
19434 #endif //MULTIPLE_HEAPS
19435
19436 void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
19437 {
19438     assert (settings.concurrent == FALSE);
19439
19440     ScanContext sc;
19441     sc.thread_number = heap_number;
19442     sc.promotion = TRUE;
19443     sc.concurrent = FALSE;
19444
19445     dprintf(2,("---- Mark Phase condemning %d ----", condemned_gen_number));
19446     BOOL  full_p = (condemned_gen_number == max_generation);
19447
19448 #ifdef TIME_GC
19449     unsigned start;
19450     unsigned finish;
19451     start = GetCycleCount32();
19452 #endif //TIME_GC
19453
19454     int gen_to_init = condemned_gen_number;
19455     if (condemned_gen_number == max_generation)
19456     {
19457         gen_to_init = max_generation + 1;
19458     }
19459     for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++)
19460     {
19461         dynamic_data* dd = dynamic_data_of (gen_idx);
19462         dd_begin_data_size (dd) = generation_size (gen_idx) - 
19463                                    dd_fragmentation (dd) -
19464                                    Align (size (generation_allocation_start (generation_of (gen_idx))));
19465         dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd)));
19466         dd_survived_size (dd) = 0;
19467         dd_pinned_survived_size (dd) = 0;
19468         dd_artificial_pinned_survived_size (dd) = 0;
19469         dd_added_pinned_size (dd) = 0;
19470 #ifdef SHORT_PLUGS
19471         dd_padding_size (dd) = 0;
19472 #endif //SHORT_PLUGS
19473 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
19474         dd_num_npinned_plugs (dd) = 0;
19475 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
19476     }
19477
19478 #ifdef FFIND_OBJECT
19479     if (gen0_must_clear_bricks > 0)
19480         gen0_must_clear_bricks--;
19481 #endif //FFIND_OBJECT
19482
19483     size_t last_promoted_bytes = 0;
19484
19485     promoted_bytes (heap_number) = 0;
19486     reset_mark_stack();
19487
19488 #ifdef SNOOP_STATS
19489     memset (&snoop_stat, 0, sizeof(snoop_stat));
19490     snoop_stat.heap_index = heap_number;
19491 #endif //SNOOP_STATS
19492
19493 #ifdef MH_SC_MARK
19494     if (full_p)
19495     {
19496         //initialize the mark stack
19497         for (int i = 0; i < max_snoop_level; i++)
19498         {
19499             ((uint8_t**)(mark_stack_array))[i] = 0;
19500         }
19501
19502         mark_stack_busy() = 1;
19503     }
19504 #endif //MH_SC_MARK
19505
19506     static uint32_t num_sizedrefs = 0;
19507
19508 #ifdef MH_SC_MARK
19509     static BOOL do_mark_steal_p = FALSE;
19510 #endif //MH_SC_MARK
19511
19512 #ifdef MULTIPLE_HEAPS
19513     gc_t_join.join(this, gc_join_begin_mark_phase);
19514     if (gc_t_join.joined())
19515     {
19516 #endif //MULTIPLE_HEAPS
19517
19518         num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
19519
19520 #ifdef MULTIPLE_HEAPS
19521
19522 #ifdef MH_SC_MARK
19523         if (full_p)
19524         {
19525             size_t total_heap_size = get_total_heap_size();
19526
19527             if (total_heap_size > (100 * 1024 * 1024))
19528             {
19529                 do_mark_steal_p = TRUE;
19530             }
19531             else
19532             {
19533                 do_mark_steal_p = FALSE;
19534             }
19535         }
19536         else
19537         {
19538             do_mark_steal_p = FALSE;
19539         }
19540 #endif //MH_SC_MARK
19541
19542         gc_t_join.restart();
19543     }
19544 #endif //MULTIPLE_HEAPS
19545
19546     {
19547
19548 #ifdef MARK_LIST
19549         //set up the mark lists from g_mark_list
19550         assert (g_mark_list);
19551 #ifdef MULTIPLE_HEAPS
19552         mark_list = &g_mark_list [heap_number*mark_list_size];
19553 #else
19554         mark_list = g_mark_list;
19555 #endif //MULTIPLE_HEAPS
19556         //dont use the mark list for full gc
19557         //because multiple segments are more complex to handle and the list
19558         //is likely to overflow
19559         if (condemned_gen_number != max_generation)
19560             mark_list_end = &mark_list [mark_list_size-1];
19561         else
19562             mark_list_end = &mark_list [0];
19563         mark_list_index = &mark_list [0];
19564 #endif //MARK_LIST
19565
19566         shigh = (uint8_t*) 0;
19567         slow  = MAX_PTR;
19568
19569         //%type%  category = quote (mark);
19570
19571         if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
19572         {
19573             GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19574             fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
19575             last_promoted_bytes = promoted_bytes (heap_number);
19576
19577 #ifdef MULTIPLE_HEAPS
19578             gc_t_join.join(this, gc_join_scan_sizedref_done);
19579             if (gc_t_join.joined())
19580             {
19581                 dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots"));
19582                 gc_t_join.restart();
19583             }
19584 #endif //MULTIPLE_HEAPS
19585         }
19586     
19587         dprintf(3,("Marking Roots"));
19588
19589         GCScan::GcScanRoots(GCHeap::Promote,
19590                                 condemned_gen_number, max_generation,
19591                                 &sc);
19592
19593         fire_mark_event (heap_number, ETW::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
19594         last_promoted_bytes = promoted_bytes (heap_number);
19595
19596 #ifdef BACKGROUND_GC
19597         if (recursive_gc_sync::background_running_p())
19598         {
19599             scan_background_roots (GCHeap::Promote, heap_number, &sc);
19600         }
19601 #endif //BACKGROUND_GC
19602
19603 #ifdef FEATURE_PREMORTEM_FINALIZATION
19604         dprintf(3, ("Marking finalization data"));
19605         finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0);
19606 #endif // FEATURE_PREMORTEM_FINALIZATION
19607
19608         fire_mark_event (heap_number, ETW::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
19609         last_promoted_bytes = promoted_bytes (heap_number);
19610
19611 // MTHTS
19612         {
19613
19614             dprintf(3,("Marking handle table"));
19615             GCScan::GcScanHandles(GCHeap::Promote,
19616                                       condemned_gen_number, max_generation,
19617                                       &sc);
19618             fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
19619             last_promoted_bytes = promoted_bytes (heap_number);
19620         }
19621
19622 #ifdef TRACE_GC
19623         size_t promoted_before_cards = promoted_bytes (heap_number);
19624 #endif //TRACE_GC
19625
19626         dprintf (3, ("before cards: %Id", promoted_before_cards));
19627         if (!full_p)
19628         {
19629 #ifdef CARD_BUNDLE
19630 #ifdef MULTIPLE_HEAPS
19631             if (gc_t_join.r_join(this, gc_r_join_update_card_bundle))
19632             {
19633 #endif //MULTIPLE_HEAPS
19634
19635 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
19636                 // If we are manually managing card bundles, every write to the card table should already be
19637                 // accounted for in the card bundle table so there's nothing to update here.
19638                 update_card_table_bundle();
19639 #endif
19640                 if (card_bundles_enabled())
19641                 {
19642                     verify_card_bundles();
19643                 }
19644
19645 #ifdef MULTIPLE_HEAPS
19646                 gc_t_join.r_restart();
19647             }
19648 #endif //MULTIPLE_HEAPS
19649 #endif //CARD_BUNDLE
19650
19651             card_fn mark_object_fn = &gc_heap::mark_object_simple;
19652 #ifdef HEAP_ANALYZE
19653             heap_analyze_success = TRUE;
19654             if (heap_analyze_enabled)
19655             {
19656                 internal_root_array_index = 0;
19657                 current_obj = 0;
19658                 current_obj_size = 0;
19659                 mark_object_fn = &gc_heap::ha_mark_object_simple;
19660             }
19661 #endif //HEAP_ANALYZE
19662
19663             dprintf(3,("Marking cross generation pointers"));
19664             mark_through_cards_for_segments (mark_object_fn, FALSE);
19665
19666             dprintf(3,("Marking cross generation pointers for large objects"));
19667             mark_through_cards_for_large_objects (mark_object_fn, FALSE);
19668
19669             dprintf (3, ("marked by cards: %Id", 
19670                 (promoted_bytes (heap_number) - promoted_before_cards)));
19671             fire_mark_event (heap_number, ETW::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
19672             last_promoted_bytes = promoted_bytes (heap_number);
19673         }
19674     }
19675
19676 #ifdef MH_SC_MARK
19677     if (do_mark_steal_p)
19678     {
19679         mark_steal();
19680     }
19681 #endif //MH_SC_MARK
19682
19683     // Dependent handles need to be scanned with a special algorithm (see the header comment on
19684     // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other
19685     // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation
19686     // but in a common case (where there are no dependent handles that are due to be collected) it allows us
19687     // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more
19688     // iterations if required and will also perform processing of any mark stack overflow once the dependent
19689     // handle table has been fully promoted.
19690     GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19691     scan_dependent_handles(condemned_gen_number, &sc, true);
19692
19693 #ifdef MULTIPLE_HEAPS
19694     dprintf(3, ("Joining for short weak handle scan"));
19695     gc_t_join.join(this, gc_join_null_dead_short_weak);
19696     if (gc_t_join.joined())
19697 #endif //MULTIPLE_HEAPS
19698     {
19699 #ifdef HEAP_ANALYZE
19700         heap_analyze_enabled = FALSE;
19701         GCToEEInterface::AnalyzeSurvivorsFinished(condemned_gen_number);
19702 #endif // HEAP_ANALYZE
19703         GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc);
19704
19705 #ifdef MULTIPLE_HEAPS
19706         if (!full_p)
19707         {
19708             // we used r_join and need to reinitialize states for it here.
19709             gc_t_join.r_init();
19710         }
19711
19712         //start all threads on the roots.
19713         dprintf(3, ("Starting all gc thread for short weak handle scan"));
19714         gc_t_join.restart();
19715 #endif //MULTIPLE_HEAPS
19716
19717     }
19718
19719     // null out the target of short weakref that were not promoted.
19720     GCScan::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);
19721
19722 // MTHTS: keep by single thread
19723 #ifdef MULTIPLE_HEAPS
19724     dprintf(3, ("Joining for finalization"));
19725     gc_t_join.join(this, gc_join_scan_finalization);
19726     if (gc_t_join.joined())
19727 #endif //MULTIPLE_HEAPS
19728
19729     {
19730 #ifdef MULTIPLE_HEAPS
19731         //start all threads on the roots.
19732         dprintf(3, ("Starting all gc thread for Finalization"));
19733         gc_t_join.restart();
19734 #endif //MULTIPLE_HEAPS
19735     }
19736
19737     //Handle finalization.
19738     size_t promoted_bytes_live = promoted_bytes (heap_number);
19739
19740 #ifdef FEATURE_PREMORTEM_FINALIZATION
19741     dprintf (3, ("Finalize marking"));
19742     finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
19743
19744     GCToEEInterface::DiagWalkFReachableObjects(__this);
19745 #endif // FEATURE_PREMORTEM_FINALIZATION
19746
19747     // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
19748     // for finalization. As before scan_dependent_handles will also process any mark stack overflow.
19749     scan_dependent_handles(condemned_gen_number, &sc, false);
19750
19751 #ifdef MULTIPLE_HEAPS
19752     dprintf(3, ("Joining for weak pointer deletion"));
19753     gc_t_join.join(this, gc_join_null_dead_long_weak);
19754     if (gc_t_join.joined())
19755     {
19756         //start all threads on the roots.
19757         dprintf(3, ("Starting all gc thread for weak pointer deletion"));
19758         gc_t_join.restart();
19759     }
19760 #endif //MULTIPLE_HEAPS
19761
19762     // null out the target of long weakref that were not promoted.
19763     GCScan::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19764
19765 // MTHTS: keep by single thread
19766 #ifdef MULTIPLE_HEAPS
19767 #ifdef MARK_LIST
19768 #ifdef PARALLEL_MARK_LIST_SORT
19769 //    unsigned long start = GetCycleCount32();
19770     sort_mark_list();
19771 //    printf("sort_mark_list took %u cycles\n", GetCycleCount32() - start);
19772 #endif //PARALLEL_MARK_LIST_SORT
19773 #endif //MARK_LIST
19774
19775     dprintf (3, ("Joining for sync block cache entry scanning"));
19776     gc_t_join.join(this, gc_join_null_dead_syncblk);
19777     if (gc_t_join.joined())
19778 #endif //MULTIPLE_HEAPS
19779     {
19780         // scan for deleted entries in the syncblk cache
19781         GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
19782
19783 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
19784         if (g_fEnableARM)
19785         {
19786             size_t promoted_all_heaps = 0;
19787 #ifdef MULTIPLE_HEAPS
19788             for (int i = 0; i < n_heaps; i++)
19789             {
19790                 promoted_all_heaps += promoted_bytes (i);
19791             }
19792 #else
19793             promoted_all_heaps = promoted_bytes (heap_number);
19794 #endif //MULTIPLE_HEAPS
19795             SystemDomain::RecordTotalSurvivedBytes (promoted_all_heaps);
19796         }
19797 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
19798
19799 #ifdef MULTIPLE_HEAPS
19800
19801 #ifdef MARK_LIST
19802 #ifndef PARALLEL_MARK_LIST_SORT
19803         //compact g_mark_list and sort it.
19804         combine_mark_lists();
19805 #endif //PARALLEL_MARK_LIST_SORT
19806 #endif //MARK_LIST
19807
19808         //decide on promotion
19809         if (!settings.promotion)
19810         {
19811             size_t m = 0;
19812             for (int n = 0; n <= condemned_gen_number;n++)
19813             {
19814                 m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1);
19815             }
19816
19817             for (int i = 0; i < n_heaps; i++)
19818             {
19819                 dynamic_data* dd = g_heaps[i]->dynamic_data_of (min (condemned_gen_number +1,
19820                                                                      max_generation));
19821                 size_t older_gen_size = (dd_current_size (dd) +
19822                                          (dd_desired_allocation (dd) -
19823                                          dd_new_allocation (dd)));
19824
19825                 if ((m > (older_gen_size)) ||
19826                     (promoted_bytes (i) > m))
19827                 {
19828                     settings.promotion = TRUE;
19829                 }
19830             }
19831         }
19832
19833 #ifdef SNOOP_STATS
19834         if (do_mark_steal_p)
19835         {
19836             size_t objects_checked_count = 0;
19837             size_t zero_ref_count = 0;
19838             size_t objects_marked_count = 0;
19839             size_t check_level_count = 0;
19840             size_t busy_count = 0;
19841             size_t interlocked_count = 0;
19842             size_t partial_mark_parent_count = 0;
19843             size_t stolen_or_pm_count = 0; 
19844             size_t stolen_entry_count = 0; 
19845             size_t pm_not_ready_count = 0; 
19846             size_t normal_count = 0;
19847             size_t stack_bottom_clear_count = 0;
19848
19849             for (int i = 0; i < n_heaps; i++)
19850             {
19851                 gc_heap* hp = g_heaps[i];
19852                 hp->print_snoop_stat();
19853                 objects_checked_count += hp->snoop_stat.objects_checked_count;
19854                 zero_ref_count += hp->snoop_stat.zero_ref_count;
19855                 objects_marked_count += hp->snoop_stat.objects_marked_count;
19856                 check_level_count += hp->snoop_stat.check_level_count;
19857                 busy_count += hp->snoop_stat.busy_count;
19858                 interlocked_count += hp->snoop_stat.interlocked_count;
19859                 partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count;
19860                 stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count;
19861                 stolen_entry_count += hp->snoop_stat.stolen_entry_count;
19862                 pm_not_ready_count += hp->snoop_stat.pm_not_ready_count;
19863                 normal_count += hp->snoop_stat.normal_count;
19864                 stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count;
19865             }
19866
19867             fflush (stdout);
19868
19869             printf ("-------total stats-------\n");
19870             printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n", 
19871                 "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
19872             printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
19873                 objects_checked_count,
19874                 zero_ref_count,
19875                 objects_marked_count,
19876                 check_level_count,
19877                 busy_count,
19878                 interlocked_count,
19879                 partial_mark_parent_count,
19880                 stolen_or_pm_count,
19881                 stolen_entry_count,
19882                 pm_not_ready_count,
19883                 normal_count,
19884                 stack_bottom_clear_count);
19885         }
19886 #endif //SNOOP_STATS
19887
19888         //start all threads.
19889         dprintf(3, ("Starting all threads for end of mark phase"));
19890         gc_t_join.restart();
19891 #else //MULTIPLE_HEAPS
19892
19893         //decide on promotion
19894         if (!settings.promotion)
19895         {
19896             size_t m = 0;
19897             for (int n = 0; n <= condemned_gen_number;n++)
19898             {
19899                 m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06);
19900             }
19901             dynamic_data* dd = dynamic_data_of (min (condemned_gen_number +1,
19902                                                      max_generation));
19903             size_t older_gen_size = (dd_current_size (dd) +
19904                                      (dd_desired_allocation (dd) -
19905                                      dd_new_allocation (dd)));
19906
19907             dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id",
19908                          m, promoted_bytes (heap_number), older_gen_size));
19909
19910             if ((m > older_gen_size) ||
19911                     (promoted_bytes (heap_number) > m))
19912             {
19913                 settings.promotion = TRUE;
19914             }
19915         }
19916
19917 #endif //MULTIPLE_HEAPS
19918     }
19919
19920 #ifdef MULTIPLE_HEAPS
19921 #ifdef MARK_LIST
19922 #ifdef PARALLEL_MARK_LIST_SORT
19923 //    start = GetCycleCount32();
19924     merge_mark_lists();
19925 //    printf("merge_mark_lists took %u cycles\n", GetCycleCount32() - start);
19926 #endif //PARALLEL_MARK_LIST_SORT
19927 #endif //MARK_LIST
19928 #endif //MULTIPLE_HEAPS
19929
19930 #ifdef BACKGROUND_GC
19931     total_promoted_bytes = promoted_bytes (heap_number);
19932 #endif //BACKGROUND_GC
19933
19934     promoted_bytes (heap_number) -= promoted_bytes_live;
19935
19936 #ifdef TIME_GC
19937         finish = GetCycleCount32();
19938         mark_time = finish - start;
19939 #endif //TIME_GC
19940
19941     dprintf(2,("---- End of mark phase ----"));
19942 }
19943
19944 inline
19945 void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high)
19946 {
19947     dprintf (3, ("Pinning %Ix", (size_t)o));
19948     if ((o >= low) && (o < high))
19949     {
19950         dprintf(3,("^%Ix^", (size_t)o));
19951         set_pinned (o);
19952
19953 #ifdef FEATURE_EVENT_TRACE        
19954         if(EVENT_ENABLED(PinObjectAtGCTime))
19955         {
19956             fire_etw_pin_object_event(o, ppObject);
19957         }
19958 #endif // FEATURE_EVENT_TRACE
19959
19960 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
19961         num_pinned_objects++;
19962 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
19963     }
19964 }
19965
19966 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
19967 size_t gc_heap::get_total_pinned_objects()
19968 {
19969 #ifdef MULTIPLE_HEAPS
19970     size_t total_num_pinned_objects = 0;
19971     for (int i = 0; i < gc_heap::n_heaps; i++)
19972     {
19973         gc_heap* hp = gc_heap::g_heaps[i];
19974         total_num_pinned_objects += hp->num_pinned_objects;
19975     }
19976     return total_num_pinned_objects;
19977 #else //MULTIPLE_HEAPS
19978     return num_pinned_objects;
19979 #endif //MULTIPLE_HEAPS
19980 }
19981 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
19982
19983 void gc_heap::reset_mark_stack ()
19984 {
19985     reset_pinned_queue();
19986     max_overflow_address = 0;
19987     min_overflow_address = MAX_PTR;
19988 }
19989
19990 #ifdef FEATURE_STRUCTALIGN
19991 //
19992 // The word with left child, right child, and align info is laid out as follows:
19993 //
19994 //      |   upper short word   |   lower short word   |
19995 //      |<------------> <----->|<------------> <----->|
19996 //      |  left child   info hi| right child   info lo|
19997 // x86: |    10 bits     6 bits|   10 bits      6 bits|
19998 //
19999 // where left/right child are signed values and concat(info hi, info lo) is unsigned.
20000 //
20001 // The "align info" encodes two numbers: the required alignment (a power of two)
20002 // and the misalignment (the number of machine words the destination address needs
20003 // to be adjusted by to provide alignment - so this number is always smaller than
20004 // the required alignment).  Thus, the two can be represented as the "logical or"
20005 // of the two numbers.  Note that the actual pad is computed from the misalignment
20006 // by adding the alignment iff the misalignment is non-zero and less than min_obj_size.
20007 //
20008
20009 // The number of bits in a brick.
20010 #if defined (_TARGET_AMD64_)
20011 #define brick_bits (12)
20012 #else
20013 #define brick_bits (11)
20014 #endif //_TARGET_AMD64_
20015 C_ASSERT(brick_size == (1 << brick_bits));
20016
20017 // The number of bits needed to represent the offset to a child node.
20018 // "brick_bits + 1" allows us to represent a signed offset within a brick.
20019 #define child_bits (brick_bits + 1 - LOG2_PTRSIZE)
20020
20021 // The number of bits in each of the pad hi, pad lo fields.
20022 #define pad_bits (sizeof(short) * 8 - child_bits)
20023
20024 #define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1))
20025 #define pad_mask ((1 << pad_bits) - 1)
20026 #define pad_from_short(w) ((size_t)(w) & pad_mask)
20027 #else // FEATURE_STRUCTALIGN
20028 #define child_from_short(w) (w)
20029 #endif // FEATURE_STRUCTALIGN
20030
20031 inline
20032 short node_left_child(uint8_t* node)
20033 {
20034     return child_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20035 }
20036
20037 inline
20038 void set_node_left_child(uint8_t* node, ptrdiff_t val)
20039 {
20040     assert (val > -(ptrdiff_t)brick_size);
20041     assert (val < (ptrdiff_t)brick_size);
20042     assert (Aligned (val));
20043 #ifdef FEATURE_STRUCTALIGN
20044     size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20045     ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20046 #else // FEATURE_STRUCTALIGN
20047     ((plug_and_pair*)node)[-1].m_pair.left = (short)val;
20048 #endif // FEATURE_STRUCTALIGN
20049     assert (node_left_child (node) == val);
20050 }
20051
20052 inline
20053 short node_right_child(uint8_t* node)
20054 {
20055     return child_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20056 }
20057
20058 inline
20059 void set_node_right_child(uint8_t* node, ptrdiff_t val)
20060 {
20061     assert (val > -(ptrdiff_t)brick_size);
20062     assert (val < (ptrdiff_t)brick_size);
20063     assert (Aligned (val));
20064 #ifdef FEATURE_STRUCTALIGN
20065     size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20066     ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20067 #else // FEATURE_STRUCTALIGN
20068     ((plug_and_pair*)node)[-1].m_pair.right = (short)val;
20069 #endif // FEATURE_STRUCTALIGN
20070     assert (node_right_child (node) == val);
20071 }
20072
20073 #ifdef FEATURE_STRUCTALIGN
20074 void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad)
20075 {
20076     // Extract the single-number aligninfo from the fields.
20077     short left = ((plug_and_pair*)node)[-1].m_pair.left;
20078     short right = ((plug_and_pair*)node)[-1].m_pair.right;
20079     ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right);
20080     ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT;
20081
20082     // Replicate the topmost bit into all lower bits.
20083     ptrdiff_t x = aligninfo;
20084     x |= x >> 8;
20085     x |= x >> 4;
20086     x |= x >> 2;
20087     x |= x >> 1;
20088
20089     // Clear all bits but the highest.
20090     requiredAlignment = (int)(x ^ (x >> 1));
20091     pad = aligninfo - requiredAlignment;
20092     pad += AdjustmentForMinPadSize(pad, requiredAlignment);
20093 }
20094
20095 inline
20096 ptrdiff_t node_alignpad (uint8_t* node)
20097 {
20098     int requiredAlignment;
20099     ptrdiff_t alignpad;
20100     node_aligninfo (node, requiredAlignment, alignpad);
20101     return alignpad;
20102 }
20103
20104 void clear_node_aligninfo (uint8_t* node)
20105 {
20106     ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits;
20107     ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits;
20108 }
20109
20110 void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad)
20111 {
20112     // Encode the alignment requirement and alignment offset as a single number
20113     // as described above.
20114     ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1));
20115     assert (Aligned (aligninfo));
20116     ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT;
20117     assert (aligninfo_shifted < (1 << (pad_bits + pad_bits)));
20118
20119     ptrdiff_t hi = aligninfo_shifted >> pad_bits;
20120     assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0);
20121     ((plug_and_pair*)node)[-1].m_pair.left |= hi;
20122
20123     ptrdiff_t lo = aligninfo_shifted & pad_mask;
20124     assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0);
20125     ((plug_and_pair*)node)[-1].m_pair.right |= lo;
20126
20127 #ifdef _DEBUG
20128     int requiredAlignment2;
20129     ptrdiff_t pad2;
20130     node_aligninfo (node, requiredAlignment2, pad2);
20131     assert (requiredAlignment == requiredAlignment2);
20132     assert (pad == pad2);
20133 #endif // _DEBUG
20134 }
20135 #endif // FEATURE_STRUCTALIGN
20136
20137 inline
20138 void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20139 {
20140     ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc);
20141     *place = val;
20142 }
20143
20144 inline
20145 ptrdiff_t loh_node_relocation_distance(uint8_t* node)
20146 {
20147     return (((loh_obj_and_pad*)node)[-1].reloc);
20148 }
20149
20150 inline
20151 ptrdiff_t node_relocation_distance (uint8_t* node)
20152 {
20153     return (((plug_and_reloc*)(node))[-1].reloc & ~3);
20154 }
20155
20156 inline
20157 void set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20158 {
20159     assert (val == (val & ~3));
20160     ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
20161     //clear the left bit and the relocation field
20162     *place &= 1;
20163     // store the value
20164     *place |= val;
20165 }
20166
20167 #define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2)
20168
20169 #define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2;
20170
20171 #ifndef FEATURE_STRUCTALIGN
20172 void set_node_realigned(uint8_t* node)
20173 {
20174     ((plug_and_reloc*)(node))[-1].reloc |= 1;
20175 }
20176
20177 void clear_node_realigned(uint8_t* node)
20178 {
20179 #ifdef RESPECT_LARGE_ALIGNMENT
20180     ((plug_and_reloc*)(node))[-1].reloc &= ~1;
20181 #else
20182     UNREFERENCED_PARAMETER(node);
20183 #endif //RESPECT_LARGE_ALIGNMENT
20184 }
20185 #endif // FEATURE_STRUCTALIGN
20186
20187 inline
20188 size_t  node_gap_size (uint8_t* node)
20189 {
20190     return ((plug_and_gap *)node)[-1].gap;
20191 }
20192
20193 void set_gap_size (uint8_t* node, size_t size)
20194 {
20195     assert (Aligned (size));
20196
20197     // clear the 2 uint32_t used by the node.
20198     ((plug_and_gap *)node)[-1].reloc = 0;
20199     ((plug_and_gap *)node)[-1].lr =0;
20200     ((plug_and_gap *)node)[-1].gap = size;
20201
20202     assert ((size == 0 )||(size >= sizeof(plug_and_reloc)));
20203
20204 }
20205
20206 uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number,
20207                    uint8_t* tree, uint8_t* last_node)
20208 {
20209     dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]",
20210                  (size_t)new_node, brick_of(new_node), 
20211                  (size_t)tree, brick_of(tree), 
20212                  (size_t)last_node, brick_of(last_node),
20213                  sequence_number));
20214     if (power_of_two_p (sequence_number))
20215     {
20216         set_node_left_child (new_node, (tree - new_node));
20217         dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node)));
20218         tree = new_node;
20219     }
20220     else
20221     {
20222         if (oddp (sequence_number))
20223         {
20224             set_node_right_child (last_node, (new_node - last_node));
20225             dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node)));
20226         }
20227         else
20228         {
20229             uint8_t*  earlier_node = tree;
20230             size_t imax = logcount(sequence_number) - 2;
20231             for (size_t i = 0; i != imax; i++)
20232             {
20233                 earlier_node = earlier_node + node_right_child (earlier_node);
20234             }
20235             int tmp_offset = node_right_child (earlier_node);
20236             assert (tmp_offset); // should never be empty
20237             set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node));
20238             set_node_right_child (earlier_node, (new_node - earlier_node));
20239
20240             dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix", 
20241                 new_node, ((earlier_node + tmp_offset ) - new_node),
20242                 earlier_node, (new_node - earlier_node)));
20243         }
20244     }
20245     return tree;
20246 }
20247
20248 size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick,
20249                                     uint8_t* x, uint8_t* plug_end)
20250 {
20251     dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix",
20252         tree, current_brick, x, plug_end));
20253
20254     if (tree != NULL)
20255     {
20256         dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix", 
20257             current_brick, (size_t)(tree - brick_address (current_brick)), tree));
20258         set_brick (current_brick, (tree - brick_address (current_brick)));
20259     }
20260     else
20261     {
20262         dprintf (3, ("b- %Ix->-1", current_brick));
20263         set_brick (current_brick, -1);
20264     }
20265     size_t  b = 1 + current_brick;
20266     ptrdiff_t  offset = 0;
20267     size_t last_br = brick_of (plug_end-1);
20268     current_brick = brick_of (x-1);
20269     dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick));
20270     while (b <= current_brick)
20271     {
20272         if (b <= last_br)
20273         {
20274             set_brick (b, --offset);
20275         }
20276         else
20277         {
20278             set_brick (b,-1);
20279         }
20280         b++;
20281     }
20282     return brick_of (x);
20283 }
20284
20285 void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate)
20286 {
20287 #ifdef BIT64
20288     // We should never demote big plugs to gen0.
20289     if (gen == youngest_generation)
20290     {
20291         heap_segment* seg = ephemeral_heap_segment;
20292         size_t mark_stack_large_bos = mark_stack_bos;
20293         size_t large_plug_pos = 0;
20294         while (mark_stack_large_bos < mark_stack_tos)
20295         {
20296             if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th)
20297             {
20298                 while (mark_stack_bos <= mark_stack_large_bos)
20299                 {
20300                     size_t entry = deque_pinned_plug();
20301                     size_t len = pinned_len (pinned_plug_of (entry));
20302                     uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20303                     if (len > demotion_plug_len_th)
20304                     {
20305                         dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len)));
20306                     }
20307                     pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen);
20308                     assert(mark_stack_array[entry].len == 0 ||
20309                             mark_stack_array[entry].len >= Align(min_obj_size));
20310                     generation_allocation_pointer (consing_gen) = plug + len;
20311                     generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg);
20312                     set_allocator_next_pin (consing_gen);
20313                 }
20314             }
20315
20316             mark_stack_large_bos++;
20317         }
20318     }
20319 #endif // BIT64
20320
20321     generation_plan_allocation_start (gen) =
20322         allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1);
20323     generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20324     size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20325     if (next_plug_to_allocate)
20326     {
20327         size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen));
20328         if (allocation_left > dist_to_next_plug)
20329         {
20330             allocation_left = dist_to_next_plug;
20331         }
20332     }
20333     if (allocation_left < Align (min_obj_size))
20334     {
20335         generation_plan_allocation_start_size (gen) += allocation_left;
20336         generation_allocation_pointer (consing_gen) += allocation_left;
20337     }
20338
20339     dprintf (1, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num, 
20340         generation_plan_allocation_start (gen),
20341         generation_plan_allocation_start_size (gen),
20342         generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen),
20343         next_plug_to_allocate));
20344 }
20345
20346 void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen)
20347 {
20348     BOOL adjacentp = FALSE;
20349
20350     generation_plan_allocation_start (gen) =  
20351         allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0, 
20352 #ifdef SHORT_PLUGS
20353                                    FALSE, NULL, 
20354 #endif //SHORT_PLUGS
20355                                    FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG);
20356
20357     generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20358     size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20359     if ((allocation_left < Align (min_obj_size)) && 
20360          (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen))))
20361     {
20362         generation_plan_allocation_start_size (gen) += allocation_left;
20363         generation_allocation_pointer (consing_gen) += allocation_left;
20364     }
20365
20366     dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num, 
20367         generation_plan_allocation_start (consing_gen),
20368         generation_allocation_pointer (consing_gen), 
20369         generation_allocation_limit (consing_gen))); 
20370 }
20371
20372 void gc_heap::plan_generation_starts (generation*& consing_gen)
20373 {
20374     //make sure that every generation has a planned allocation start
20375     int  gen_number = settings.condemned_generation;
20376     while (gen_number >= 0)
20377     {
20378         if (gen_number < max_generation)
20379         {
20380             consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20381         }
20382         generation* gen = generation_of (gen_number);
20383         if (0 == generation_plan_allocation_start (gen))
20384         {
20385             plan_generation_start (gen, consing_gen, 0);
20386             assert (generation_plan_allocation_start (gen));
20387         }
20388         gen_number--;
20389     }
20390     // now we know the planned allocation size
20391     heap_segment_plan_allocated (ephemeral_heap_segment) =
20392         generation_allocation_pointer (consing_gen);
20393 }
20394
20395 void gc_heap::advance_pins_for_demotion (generation* gen)
20396 {
20397     uint8_t* original_youngest_start = generation_allocation_start (youngest_generation);
20398     heap_segment* seg = ephemeral_heap_segment;
20399
20400     if ((!(pinned_plug_que_empty_p())))
20401     {
20402         size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation));
20403         size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted;
20404         size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen);
20405         float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip;
20406         float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1)));
20407         if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30))
20408         {
20409             while (!pinned_plug_que_empty_p() &&
20410                     (pinned_plug (oldest_pin()) < original_youngest_start))
20411             {
20412                 size_t entry = deque_pinned_plug();
20413                 size_t len = pinned_len (pinned_plug_of (entry));
20414                 uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20415                 pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen);
20416                 assert(mark_stack_array[entry].len == 0 ||
20417                         mark_stack_array[entry].len >= Align(min_obj_size));
20418                 generation_allocation_pointer (gen) = plug + len;
20419                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20420                 set_allocator_next_pin (gen);
20421
20422                 //Add the size of the pinned plug to the right pinned allocations
20423                 //find out which gen this pinned plug came from 
20424                 int frgn = object_gennum (plug);
20425                 if ((frgn != (int)max_generation) && settings.promotion)
20426                 {
20427                     int togn = object_gennum_plan (plug);
20428                     generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
20429                     if (frgn < togn)
20430                     {
20431                         generation_pinned_allocation_compact_size (generation_of (togn)) += len;
20432                     }
20433                 }
20434
20435                 dprintf (2, ("skipping gap %d, pin %Ix (%Id)", 
20436                     pinned_len (pinned_plug_of (entry)), plug, len));
20437             }
20438         }
20439         dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d", 
20440             gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100)));
20441     }
20442 }
20443
20444 void gc_heap::process_ephemeral_boundaries (uint8_t* x,
20445                                             int& active_new_gen_number,
20446                                             int& active_old_gen_number,
20447                                             generation*& consing_gen,
20448                                             BOOL& allocate_in_condemned)
20449 {
20450 retry:
20451     if ((active_old_gen_number > 0) &&
20452         (x >= generation_allocation_start (generation_of (active_old_gen_number - 1))))
20453     {
20454         dprintf (1, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x));
20455
20456         if (!pinned_plug_que_empty_p())
20457         {
20458             dprintf (1, ("oldest pin: %Ix(%Id)",
20459                 pinned_plug (oldest_pin()), 
20460                 (x - pinned_plug (oldest_pin()))));
20461         }
20462
20463         if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation))
20464         {
20465             active_new_gen_number--;
20466         }
20467
20468         active_old_gen_number--;
20469         assert ((!settings.promotion) || (active_new_gen_number>0));
20470
20471         if (active_new_gen_number == (max_generation - 1))
20472         {
20473 #ifdef FREE_USAGE_STATS
20474             if (settings.condemned_generation == max_generation)
20475             {
20476                 // We need to do this before we skip the rest of the pinned plugs.
20477                 generation* gen_2 = generation_of (max_generation);
20478                 generation* gen_1 = generation_of (max_generation - 1);
20479
20480                 size_t total_num_pinned_free_spaces_left = 0;
20481
20482                 // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is.
20483                 for (int j = 0; j < NUM_GEN_POWER2; j++)
20484                 {
20485                     dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)", 
20486                         heap_number, 
20487                         settings.gc_index,
20488                         (j + 10), 
20489                         gen_2->gen_current_pinned_free_spaces[j],
20490                         gen_2->gen_plugs[j], gen_1->gen_plugs[j],
20491                         (gen_2->gen_plugs[j] + gen_1->gen_plugs[j])));
20492
20493                     total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j];
20494                 }
20495
20496                 float pinned_free_list_efficiency = 0;
20497                 size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2);
20498                 if (total_pinned_free_space != 0)
20499                 {
20500                     pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space;
20501                 }
20502
20503                 dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left",
20504                             heap_number,
20505                             generation_allocated_in_pinned_free (gen_2),
20506                             total_pinned_free_space, 
20507                             (int)(pinned_free_list_efficiency * 100),
20508                             generation_pinned_free_obj_space (gen_2),
20509                             total_num_pinned_free_spaces_left));
20510             }
20511 #endif //FREE_USAGE_STATS
20512
20513             //Go past all of the pinned plugs for this generation.
20514             while (!pinned_plug_que_empty_p() &&
20515                    (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment)))
20516             {
20517                 size_t  entry = deque_pinned_plug();
20518                 mark*  m = pinned_plug_of (entry);
20519                 uint8_t*  plug = pinned_plug (m);
20520                 size_t  len = pinned_len (m);
20521                 // detect pinned block in different segment (later) than
20522                 // allocation segment, skip those until the oldest pin is in the ephemeral seg.
20523                 // adjust the allocation segment along the way (at the end it will
20524                 // be the ephemeral segment.
20525                 heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen));
20526
20527                 PREFIX_ASSUME(nseg != NULL);
20528
20529                 while (!((plug >= generation_allocation_pointer (consing_gen))&&
20530                         (plug < heap_segment_allocated (nseg))))
20531                 {
20532                     //adjust the end of the segment to be the end of the plug
20533                     assert (generation_allocation_pointer (consing_gen)>=
20534                             heap_segment_mem (nseg));
20535                     assert (generation_allocation_pointer (consing_gen)<=
20536                             heap_segment_committed (nseg));
20537
20538                     heap_segment_plan_allocated (nseg) =
20539                         generation_allocation_pointer (consing_gen);
20540                     //switch allocation segment
20541                     nseg = heap_segment_next_rw (nseg);
20542                     generation_allocation_segment (consing_gen) = nseg;
20543                     //reset the allocation pointer and limits
20544                     generation_allocation_pointer (consing_gen) =
20545                         heap_segment_mem (nseg);
20546                 }
20547                 set_new_pin_info (m, generation_allocation_pointer (consing_gen));
20548                 assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size));
20549                 generation_allocation_pointer (consing_gen) = plug + len;
20550                 generation_allocation_limit (consing_gen) =
20551                     generation_allocation_pointer (consing_gen);
20552             }
20553             allocate_in_condemned = TRUE;
20554             consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20555         }
20556
20557         if (active_new_gen_number != max_generation)
20558         {
20559             if (active_new_gen_number == (max_generation - 1))
20560             {
20561                 maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
20562                 if (!demote_gen1_p)
20563                     advance_pins_for_demotion (consing_gen);
20564             }
20565
20566             plan_generation_start (generation_of (active_new_gen_number), consing_gen, x);
20567                 
20568             dprintf (1, ("process eph: allocated gen%d start at %Ix", 
20569                 active_new_gen_number,
20570                 generation_plan_allocation_start (generation_of (active_new_gen_number))));
20571
20572             if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p())
20573             {
20574                 uint8_t* pplug = pinned_plug (oldest_pin());
20575                 if (object_gennum (pplug) > 0)
20576                 {
20577                     demotion_low = pplug;
20578                     dprintf (3, ("process eph: dlow->%Ix", demotion_low));
20579                 }
20580             }
20581
20582             assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
20583         }
20584
20585         goto retry;
20586     }
20587 }
20588
20589 inline
20590 void gc_heap::seg_clear_mark_bits (heap_segment* seg)
20591 {
20592     uint8_t* o = heap_segment_mem (seg);
20593     while (o < heap_segment_allocated (seg))
20594     {
20595         if (marked (o))
20596         {
20597             clear_marked (o);
20598         }
20599         o = o  + Align (size (o));
20600     }
20601 }
20602
20603 #ifdef FEATURE_BASICFREEZE
20604 void gc_heap::sweep_ro_segments (heap_segment* start_seg)
20605 {
20606     //go through all of the segment in range and reset the mark bit
20607     //TODO works only on small object segments
20608
20609     heap_segment* seg = start_seg;
20610
20611     while (seg)
20612     {
20613         if (heap_segment_read_only_p (seg) &&
20614             heap_segment_in_range_p (seg))
20615         {
20616 #ifdef BACKGROUND_GC
20617             if (settings.concurrent)
20618             {
20619                 seg_clear_mark_array_bits_soh (seg);
20620             }
20621             else
20622             {
20623                 seg_clear_mark_bits (seg);
20624             }
20625 #else //BACKGROUND_GC
20626
20627 #ifdef MARK_ARRAY
20628             if(gc_can_use_concurrent)
20629             {
20630                 clear_mark_array (max (heap_segment_mem (seg), lowest_address),
20631                               min (heap_segment_allocated (seg), highest_address),
20632                               FALSE); // read_only segments need the mark clear
20633             }
20634 #else //MARK_ARRAY
20635             seg_clear_mark_bits (seg);
20636 #endif //MARK_ARRAY
20637
20638 #endif //BACKGROUND_GC
20639         }
20640         seg = heap_segment_next (seg);
20641     }
20642 }
20643 #endif // FEATURE_BASICFREEZE
20644
20645 #ifdef FEATURE_LOH_COMPACTION
20646 inline
20647 BOOL gc_heap::loh_pinned_plug_que_empty_p()
20648 {
20649     return (loh_pinned_queue_bos == loh_pinned_queue_tos);
20650 }
20651
20652 void gc_heap::loh_set_allocator_next_pin()
20653 {
20654     if (!(loh_pinned_plug_que_empty_p()))
20655     {
20656         mark*  oldest_entry = loh_oldest_pin();
20657         uint8_t* plug = pinned_plug (oldest_entry);
20658         generation* gen = large_object_generation;
20659         if ((plug >= generation_allocation_pointer (gen)) &&
20660             (plug <  generation_allocation_limit (gen)))
20661         {
20662             generation_allocation_limit (gen) = pinned_plug (oldest_entry);
20663         }
20664         else
20665             assert (!((plug < generation_allocation_pointer (gen)) &&
20666                       (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
20667     }
20668 }
20669
20670 size_t gc_heap::loh_deque_pinned_plug ()
20671 {
20672     size_t m = loh_pinned_queue_bos;
20673     loh_pinned_queue_bos++;
20674     return m;
20675 }
20676
20677 inline
20678 mark* gc_heap::loh_pinned_plug_of (size_t bos)
20679 {
20680     return &loh_pinned_queue[bos];
20681 }
20682
20683 inline
20684 mark* gc_heap::loh_oldest_pin()
20685 {
20686     return loh_pinned_plug_of (loh_pinned_queue_bos);
20687 }
20688
20689 // If we can't grow the queue, then don't compact.
20690 BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len)
20691 {
20692     assert(len >= Align(min_obj_size, get_alignment_constant (FALSE)));
20693
20694     if (loh_pinned_queue_length <= loh_pinned_queue_tos)
20695     {
20696         if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH))
20697         {
20698             return FALSE;
20699         }
20700     }
20701     dprintf (3, (" P: %Ix(%Id)", plug, len));
20702     mark& m = loh_pinned_queue[loh_pinned_queue_tos];
20703     m.first = plug;
20704     m.len = len;
20705     loh_pinned_queue_tos++;
20706     loh_set_allocator_next_pin();
20707     return TRUE;
20708 }
20709
20710 inline
20711 BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit)
20712 {
20713     dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)", 
20714         size, 
20715         (2* AlignQword (loh_padding_obj_size) +  size),
20716         alloc_pointer,
20717         alloc_limit,
20718         (alloc_limit - alloc_pointer)));
20719
20720     return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) +  size) <= alloc_limit);
20721 }
20722
20723 uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
20724 {
20725     UNREFERENCED_PARAMETER(old_loc);
20726
20727     generation* gen = large_object_generation;
20728     dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id", 
20729         generation_allocation_pointer (gen),
20730         generation_allocation_limit (gen),
20731         size));
20732
20733 retry:
20734     {
20735         heap_segment* seg = generation_allocation_segment (gen);
20736         if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen))))
20737         {
20738             if ((!(loh_pinned_plug_que_empty_p()) &&
20739                  (generation_allocation_limit (gen) ==
20740                   pinned_plug (loh_oldest_pin()))))
20741             {
20742                 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
20743                 size_t len = pinned_len (m);
20744                 uint8_t* plug = pinned_plug (m);
20745                 dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
20746                 pinned_len (m) = plug - generation_allocation_pointer (gen);
20747                 generation_allocation_pointer (gen) = plug + len;
20748                 
20749                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20750                 loh_set_allocator_next_pin();
20751                 dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)", 
20752                     generation_allocation_pointer (gen), 
20753                     generation_allocation_limit (gen),
20754                     (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
20755
20756                 goto retry;
20757             }
20758
20759             if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
20760             {
20761                 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20762                 dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen)));
20763             }
20764             else
20765             {
20766                 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
20767                 {
20768                     heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
20769                     generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20770                     dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen)));
20771                 }
20772                 else
20773                 {
20774                     if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) &&
20775                         (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size)))))
20776                     {
20777                         dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg),
20778                                          (generation_allocation_pointer (gen) + size)));
20779
20780                         heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
20781                         generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20782
20783                         dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)", 
20784                             generation_allocation_pointer (gen), 
20785                             generation_allocation_limit (gen),
20786                             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
20787                     }
20788                     else
20789                     {
20790                         heap_segment* next_seg = heap_segment_next (seg);
20791                         assert (generation_allocation_pointer (gen)>=
20792                                 heap_segment_mem (seg));
20793                         // Verify that all pinned plugs for this segment are consumed
20794                         if (!loh_pinned_plug_que_empty_p() &&
20795                             ((pinned_plug (loh_oldest_pin()) <
20796                               heap_segment_allocated (seg)) &&
20797                              (pinned_plug (loh_oldest_pin()) >=
20798                               generation_allocation_pointer (gen))))
20799                         {
20800                             LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
20801                                          pinned_plug (loh_oldest_pin())));
20802                             dprintf (1236, ("queue empty: %d", loh_pinned_plug_que_empty_p()));
20803                             FATAL_GC_ERROR();
20804                         }
20805                         assert (generation_allocation_pointer (gen)>=
20806                                 heap_segment_mem (seg));
20807                         assert (generation_allocation_pointer (gen)<=
20808                                 heap_segment_committed (seg));
20809                         heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
20810
20811                         if (next_seg)
20812                         {
20813                             // for LOH do we want to try starting from the first LOH every time though?
20814                             generation_allocation_segment (gen) = next_seg;
20815                             generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
20816                             generation_allocation_limit (gen) = generation_allocation_pointer (gen);
20817
20818                             dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)", 
20819                                 generation_allocation_pointer (gen), 
20820                                 generation_allocation_limit (gen),
20821                                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
20822                         }
20823                         else
20824                         {
20825                             dprintf (1, ("We ran out of space compacting, shouldn't happen"));
20826                             FATAL_GC_ERROR();
20827                         }
20828                     }
20829                 }
20830             }
20831             loh_set_allocator_next_pin();
20832
20833             dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)", 
20834                 generation_allocation_pointer (gen), 
20835                 generation_allocation_limit (gen),
20836                 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
20837
20838             goto retry;
20839         }
20840     }
20841
20842     {
20843         assert (generation_allocation_pointer (gen)>=
20844                 heap_segment_mem (generation_allocation_segment (gen)));
20845         uint8_t* result = generation_allocation_pointer (gen);
20846         size_t loh_pad = AlignQword (loh_padding_obj_size);
20847
20848         generation_allocation_pointer (gen) += size + loh_pad;
20849         assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
20850
20851         dprintf (1235, ("p: %Ix, l: %Ix (%Id)", 
20852             generation_allocation_pointer (gen), 
20853             generation_allocation_limit (gen),
20854             (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
20855
20856         assert (result + loh_pad);
20857         return result + loh_pad;
20858     }
20859 }
20860
20861 BOOL gc_heap::should_compact_loh()
20862 {
20863     return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default));
20864 }
20865
20866 inline
20867 void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p)
20868 {
20869     if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once))
20870     {
20871         if (all_heaps_compacted_p)
20872         {
20873             // If the compaction mode says to compact once and we are going to compact LOH, 
20874             // we need to revert it back to no compaction.
20875             loh_compaction_mode = loh_compaction_default;
20876         }
20877     }
20878 }
20879
20880 BOOL gc_heap::plan_loh()
20881 {
20882     if (!loh_pinned_queue)
20883     {
20884         loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]);
20885         if (!loh_pinned_queue)
20886         {
20887             dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction", 
20888                          LOH_PIN_QUEUE_LENGTH * sizeof (mark)));
20889             return FALSE;
20890         }
20891
20892         loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH;
20893     }
20894
20895     if (heap_number == 0)
20896         loh_pinned_queue_decay = LOH_PIN_DECAY;
20897
20898     loh_pinned_queue_tos = 0;
20899     loh_pinned_queue_bos = 0;
20900     
20901     generation* gen        = large_object_generation;
20902     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
20903     PREFIX_ASSUME(start_seg != NULL);
20904     heap_segment* seg      = start_seg;
20905     uint8_t* o             = generation_allocation_start (gen);
20906
20907     dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n", 
20908         generation_size (max_generation + 1), 
20909         generation_free_list_space (gen),
20910         generation_free_obj_space (gen)));
20911
20912     while (seg)
20913     {
20914         heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
20915         seg = heap_segment_next (seg);
20916     }
20917
20918     seg = start_seg;
20919
20920     //Skip the generation gap object
20921     o = o + AlignQword (size (o));
20922     // We don't need to ever realloc gen3 start so don't touch it.
20923     heap_segment_plan_allocated (seg) = o;
20924     generation_allocation_pointer (gen) = o;
20925     generation_allocation_limit (gen) = generation_allocation_pointer (gen);
20926     generation_allocation_segment (gen) = start_seg;
20927
20928     uint8_t* free_space_start = o;
20929     uint8_t* free_space_end = o;
20930     uint8_t* new_address = 0;
20931
20932     while (1)
20933     {
20934         if (o >= heap_segment_allocated (seg))
20935         {
20936             seg = heap_segment_next (seg);
20937             if (seg == 0)
20938             {
20939                 break;
20940             }
20941
20942             o = heap_segment_mem (seg);
20943         }
20944
20945         if (marked (o))
20946         {
20947             free_space_end = o;
20948             size_t size = AlignQword (size (o));
20949             dprintf (1235, ("%Ix(%Id) M", o, size));
20950
20951             if (pinned (o))
20952             {
20953                 // We don't clear the pinned bit yet so we can check in 
20954                 // compact phase how big a free object we should allocate
20955                 // in front of the pinned object. We use the reloc address
20956                 // field to store this.
20957                 if (!loh_enque_pinned_plug (o, size))
20958                 {
20959                     return FALSE;
20960                 }
20961                 new_address = o;
20962             }
20963             else
20964             {
20965                 new_address = loh_allocate_in_condemned (o, size);
20966             }
20967
20968             loh_set_node_relocation_distance (o, (new_address - o));
20969             dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o)));
20970
20971             o = o + size;
20972             free_space_start = o;
20973             if (o < heap_segment_allocated (seg))
20974             {
20975                 assert (!marked (o));
20976             }
20977         }
20978         else
20979         {
20980             while (o < heap_segment_allocated (seg) && !marked (o))
20981             {
20982                 dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0)));
20983                 o = o + AlignQword (size (o));
20984             }
20985         }
20986     }
20987
20988     while (!loh_pinned_plug_que_empty_p())
20989     {
20990         mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
20991         size_t len = pinned_len (m);
20992         uint8_t* plug = pinned_plug (m);
20993
20994         // detect pinned block in different segment (later) than
20995         // allocation segment
20996         heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen));
20997
20998         while ((plug < generation_allocation_pointer (gen)) ||
20999                (plug >= heap_segment_allocated (nseg)))
21000         {
21001             assert ((plug < heap_segment_mem (nseg)) ||
21002                     (plug > heap_segment_reserved (nseg)));
21003             //adjust the end of the segment to be the end of the plug
21004             assert (generation_allocation_pointer (gen)>=
21005                     heap_segment_mem (nseg));
21006             assert (generation_allocation_pointer (gen)<=
21007                     heap_segment_committed (nseg));
21008
21009             heap_segment_plan_allocated (nseg) =
21010                 generation_allocation_pointer (gen);
21011             //switch allocation segment
21012             nseg = heap_segment_next_rw (nseg);
21013             generation_allocation_segment (gen) = nseg;
21014             //reset the allocation pointer and limits
21015             generation_allocation_pointer (gen) =
21016                 heap_segment_mem (nseg);
21017         }
21018
21019         dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
21020         pinned_len (m) = plug - generation_allocation_pointer (gen);
21021         generation_allocation_pointer (gen) = plug + len;
21022     }
21023
21024     heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
21025     generation_allocation_pointer (gen) = 0;
21026     generation_allocation_limit (gen) = 0;
21027
21028     return TRUE;
21029 }
21030
21031 void gc_heap::compact_loh()
21032 {
21033     assert (should_compact_loh());
21034
21035     generation* gen        = large_object_generation;
21036     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
21037     PREFIX_ASSUME(start_seg != NULL);
21038     heap_segment* seg      = start_seg;
21039     heap_segment* prev_seg = 0;
21040     uint8_t* o             = generation_allocation_start (gen);
21041
21042     //Skip the generation gap object
21043     o = o + AlignQword (size (o));
21044     // We don't need to ever realloc gen3 start so don't touch it.
21045     uint8_t* free_space_start = o;
21046     uint8_t* free_space_end = o;
21047     generation_allocator (gen)->clear();
21048     generation_free_list_space (gen) = 0;
21049     generation_free_obj_space (gen) = 0;
21050
21051     loh_pinned_queue_bos = 0;
21052
21053     while (1)
21054     {
21055         if (o >= heap_segment_allocated (seg))
21056         {
21057             heap_segment* next_seg = heap_segment_next (seg);
21058
21059             if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) &&
21060                 (seg != start_seg) && !heap_segment_read_only_p (seg))
21061             {
21062                 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
21063                 assert (prev_seg);
21064                 heap_segment_next (prev_seg) = next_seg;
21065                 heap_segment_next (seg) = freeable_large_heap_segment;
21066                 freeable_large_heap_segment = seg;
21067             }
21068             else
21069             {
21070                 if (!heap_segment_read_only_p (seg))
21071                 {
21072                     // We grew the segment to accommodate allocations.
21073                     if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg))
21074                     {
21075                         if ((heap_segment_plan_allocated (seg) - plug_skew)  > heap_segment_used (seg))
21076                         {
21077                             heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew;
21078                         }
21079                     }
21080
21081                     heap_segment_allocated (seg) = heap_segment_plan_allocated (seg);
21082                     dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg)));
21083                     decommit_heap_segment_pages (seg, 0);
21084                     dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix",
21085                         seg, 
21086                         heap_segment_allocated (seg),
21087                         heap_segment_used (seg),
21088                         heap_segment_committed (seg)));
21089                     //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew;
21090                     dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg)));
21091                 }
21092                 prev_seg = seg;
21093             }
21094
21095             seg = next_seg;
21096             if (seg == 0)
21097                 break;
21098             else
21099             {
21100                 o = heap_segment_mem (seg);
21101             }
21102         }
21103
21104         if (marked (o))
21105         {
21106             free_space_end = o;
21107             size_t size = AlignQword (size (o));
21108
21109             size_t loh_pad;
21110             uint8_t* reloc = o;
21111             clear_marked (o);
21112
21113             if (pinned (o))
21114             {
21115                 // We are relying on the fact the pinned objects are always looked at in the same order 
21116                 // in plan phase and in compact phase.
21117                 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21118                 uint8_t* plug = pinned_plug (m);
21119                 assert (plug == o);
21120
21121                 loh_pad = pinned_len (m);
21122                 clear_pinned (o);
21123             }
21124             else
21125             {
21126                 loh_pad = AlignQword (loh_padding_obj_size);
21127
21128                 reloc += loh_node_relocation_distance (o);
21129                 gcmemcopy (reloc, o, size, TRUE);
21130             }
21131
21132             thread_gap ((reloc - loh_pad), loh_pad, gen);
21133
21134             o = o + size;
21135             free_space_start = o;
21136             if (o < heap_segment_allocated (seg))
21137             {
21138                 assert (!marked (o));
21139             }
21140         }
21141         else
21142         {
21143             while (o < heap_segment_allocated (seg) && !marked (o))
21144             {
21145                 o = o + AlignQword (size (o));
21146             }
21147         }
21148     }
21149
21150     assert (loh_pinned_plug_que_empty_p());
21151
21152     dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", 
21153         generation_size (max_generation + 1), 
21154         generation_free_list_space (gen),
21155         generation_free_obj_space (gen)));
21156 }
21157
21158 void gc_heap::relocate_in_loh_compact()
21159 {
21160     generation* gen        = large_object_generation;
21161     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
21162     uint8_t* o             = generation_allocation_start (gen);
21163
21164     //Skip the generation gap object
21165     o = o + AlignQword (size (o));
21166
21167     relocate_args args;
21168     args.low = gc_low;
21169     args.high = gc_high;
21170     args.last_plug = 0;
21171
21172     while (1)
21173     {
21174         if (o >= heap_segment_allocated (seg))
21175         {
21176             seg = heap_segment_next (seg);
21177             if (seg == 0)
21178             {
21179                 break;
21180             }
21181
21182             o = heap_segment_mem (seg);
21183         }
21184
21185         if (marked (o))
21186         {
21187             size_t size = AlignQword (size (o));
21188
21189             check_class_object_demotion (o);
21190             if (contain_pointers (o))
21191             {
21192                 go_through_object_nostart (method_table (o), o, size(o), pval,
21193                 {
21194                     reloc_survivor_helper (pval);
21195                 });
21196             }
21197
21198             o = o + size;
21199             if (o < heap_segment_allocated (seg))
21200             {
21201                 assert (!marked (o));
21202             }
21203         }
21204         else
21205         {
21206             while (o < heap_segment_allocated (seg) && !marked (o))
21207             {
21208                 o = o + AlignQword (size (o));
21209             }
21210         }
21211     }
21212
21213     dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n", 
21214         generation_size (max_generation + 1), 
21215         generation_free_list_space (gen),
21216         generation_free_obj_space (gen)));
21217 }
21218
21219 void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
21220 {
21221     generation* gen        = large_object_generation;
21222     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
21223     uint8_t* o             = generation_allocation_start (gen);
21224
21225     //Skip the generation gap object
21226     o = o + AlignQword (size (o));
21227
21228     while (1)
21229     {
21230         if (o >= heap_segment_allocated (seg))
21231         {
21232             seg = heap_segment_next (seg);
21233             if (seg == 0)
21234             {
21235                 break;
21236             }
21237
21238             o = heap_segment_mem (seg);
21239         }
21240
21241         if (marked (o))
21242         {
21243             size_t size = AlignQword (size (o));
21244
21245             ptrdiff_t reloc = loh_node_relocation_distance (o);
21246
21247             STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
21248
21249             fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);
21250
21251             o = o + size;
21252             if (o < heap_segment_allocated (seg))
21253             {
21254                 assert (!marked (o));
21255             }
21256         }
21257         else
21258         {
21259             while (o < heap_segment_allocated (seg) && !marked (o))
21260             {
21261                 o = o + AlignQword (size (o));
21262             }
21263         }
21264     }
21265 }
21266
21267 BOOL gc_heap::loh_object_p (uint8_t* o)
21268 {
21269 #ifdef MULTIPLE_HEAPS
21270     gc_heap* hp = gc_heap::g_heaps [0];
21271     int brick_entry = hp->brick_table[hp->brick_of (o)];
21272 #else //MULTIPLE_HEAPS
21273     int brick_entry = brick_table[brick_of (o)];
21274 #endif //MULTIPLE_HEAPS
21275
21276     return (brick_entry == 0);
21277 }
21278 #endif //FEATURE_LOH_COMPACTION
21279
21280 void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p, 
21281                                       BOOL& last_pinned_plug_p, 
21282                                       BOOL& pinned_plug_p,
21283                                       size_t ps,
21284                                       size_t& artificial_pinned_size)
21285 {
21286     last_npinned_plug_p = FALSE;
21287     last_pinned_plug_p = TRUE;
21288     pinned_plug_p = TRUE;
21289     artificial_pinned_size = ps;
21290 }
21291
21292 // Because we have the artificial pinning, we can't guarantee that pinned and npinned
21293 // plugs are always interleaved.
21294 void gc_heap::store_plug_gap_info (uint8_t* plug_start,
21295                                    uint8_t* plug_end,
21296                                    BOOL& last_npinned_plug_p, 
21297                                    BOOL& last_pinned_plug_p, 
21298                                    uint8_t*& last_pinned_plug,
21299                                    BOOL& pinned_plug_p,
21300                                    uint8_t* last_object_in_last_plug,
21301                                    BOOL& merge_with_last_pin_p,
21302                                    // this is only for verification purpose
21303                                    size_t last_plug_len)
21304 {
21305     UNREFERENCED_PARAMETER(last_plug_len);
21306
21307     if (!last_npinned_plug_p && !last_pinned_plug_p)
21308     {
21309         //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start));
21310         dprintf (3, ("Free: %Ix", (plug_start - plug_end)));
21311         assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size)));
21312         set_gap_size (plug_start, plug_start - plug_end);
21313     }
21314
21315     if (pinned (plug_start))
21316     {
21317         BOOL save_pre_plug_info_p = FALSE;
21318
21319         if (last_npinned_plug_p || last_pinned_plug_p)
21320         {
21321             //if (last_plug_len == Align (min_obj_size))
21322             //{
21323             //    dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
21324             //    GCToOSInterface::DebugBreak();
21325             //}
21326             save_pre_plug_info_p = TRUE;
21327         }
21328
21329         pinned_plug_p = TRUE;
21330         last_npinned_plug_p = FALSE;
21331
21332         if (last_pinned_plug_p)
21333         {
21334             dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug));
21335             merge_with_last_pin_p = TRUE;
21336         }
21337         else
21338         {
21339             last_pinned_plug_p = TRUE;
21340             last_pinned_plug = plug_start;
21341                 
21342             enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug);
21343
21344             if (save_pre_plug_info_p)
21345             {
21346                 set_gap_size (plug_start, sizeof (gap_reloc_pair));
21347             }
21348         }
21349     }
21350     else
21351     {
21352         if (last_pinned_plug_p)
21353         {
21354             //if (Align (last_plug_len) < min_pre_pin_obj_size)
21355             //{
21356             //    dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
21357             //    GCToOSInterface::DebugBreak();
21358             //}
21359
21360             save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
21361             set_gap_size (plug_start, sizeof (gap_reloc_pair));
21362
21363             verify_pins_with_post_plug_info("after saving post plug info");
21364         }
21365         last_npinned_plug_p = TRUE;
21366         last_pinned_plug_p = FALSE;
21367     }
21368 }
21369
21370 void gc_heap::record_interesting_data_point (interesting_data_point idp)
21371 {
21372 #ifdef GC_CONFIG_DRIVEN
21373     (interesting_data_per_gc[idp])++;
21374 #else
21375     UNREFERENCED_PARAMETER(idp);
21376 #endif //GC_CONFIG_DRIVEN
21377 }
21378
21379 #ifdef _PREFAST_
21380 #pragma warning(push)
21381 #pragma warning(disable:21000) // Suppress PREFast warning about overly large function
21382 #endif //_PREFAST_
21383 void gc_heap::plan_phase (int condemned_gen_number)
21384 {
21385     size_t old_gen2_allocated = 0;
21386     size_t old_gen2_size = 0;
21387
21388     if (condemned_gen_number == (max_generation - 1))
21389     {
21390         old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation));
21391         old_gen2_size = generation_size (max_generation);
21392     }
21393
21394     assert (settings.concurrent == FALSE);
21395
21396     // %type%  category = quote (plan);
21397 #ifdef TIME_GC
21398     unsigned start;
21399     unsigned finish;
21400     start = GetCycleCount32();
21401 #endif //TIME_GC
21402
21403     dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
21404                 condemned_gen_number, settings.promotion ? 1 : 0));
21405
21406     generation*  condemned_gen1 = generation_of (condemned_gen_number);
21407
21408 #ifdef MARK_LIST
21409     BOOL use_mark_list = FALSE;
21410     uint8_t** mark_list_next = &mark_list[0];
21411 #ifdef GC_CONFIG_DRIVEN
21412     dprintf (3, ("total number of marked objects: %Id (%Id)",
21413                  (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0]))));
21414 #else
21415     dprintf (3, ("mark_list length: %Id",
21416                  (mark_list_index - &mark_list[0])));
21417 #endif //GC_CONFIG_DRIVEN
21418
21419     if ((condemned_gen_number < max_generation) &&
21420         (mark_list_index <= mark_list_end) 
21421 #ifdef BACKGROUND_GC        
21422         && (!recursive_gc_sync::background_running_p())
21423 #endif //BACKGROUND_GC
21424         )
21425     {
21426 #ifndef MULTIPLE_HEAPS
21427         _sort (&mark_list[0], mark_list_index-1, 0);
21428         //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0)));
21429         //verify_qsort_array (&mark_list[0], mark_list_index-1);
21430 #endif //!MULTIPLE_HEAPS
21431         use_mark_list = TRUE;
21432         get_gc_data_per_heap()->set_mechanism_bit (gc_mark_list_bit);
21433     }
21434     else
21435     {
21436         dprintf (3, ("mark_list not used"));
21437     }
21438
21439 #endif //MARK_LIST
21440
21441 #ifdef FEATURE_BASICFREEZE
21442     if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) &&
21443         ro_segments_in_range)
21444     {
21445         sweep_ro_segments (generation_start_segment (condemned_gen1));
21446     }
21447 #endif // FEATURE_BASICFREEZE
21448
21449 #ifndef MULTIPLE_HEAPS
21450     if (shigh != (uint8_t*)0)
21451     {
21452         heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21453
21454         PREFIX_ASSUME(seg != NULL);
21455
21456         heap_segment* fseg = seg;
21457         do
21458         {
21459             if (slow > heap_segment_mem (seg) &&
21460                 slow < heap_segment_reserved (seg))
21461             {
21462                 if (seg == fseg)
21463                 {
21464                     uint8_t* o = generation_allocation_start (condemned_gen1) +
21465                         Align (size (generation_allocation_start (condemned_gen1)));
21466                     if (slow > o)
21467                     {
21468                         assert ((slow - o) >= (int)Align (min_obj_size));
21469 #ifdef BACKGROUND_GC
21470                         if (current_c_gc_state == c_gc_state_marking)
21471                         {
21472                             bgc_clear_batch_mark_array_bits (o, slow);
21473                         }
21474 #endif //BACKGROUND_GC
21475                         make_unused_array (o, slow - o);
21476                     }
21477                 } 
21478                 else
21479                 {
21480                     assert (condemned_gen_number == max_generation);
21481                     make_unused_array (heap_segment_mem (seg),
21482                                        slow - heap_segment_mem (seg));
21483                 }
21484             }
21485             if (in_range_for_segment (shigh, seg))
21486             {
21487 #ifdef BACKGROUND_GC
21488                 if (current_c_gc_state == c_gc_state_marking)
21489                 {
21490                     bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg));
21491                 }
21492 #endif //BACKGROUND_GC
21493                 heap_segment_allocated (seg) = shigh + Align (size (shigh));
21494             }
21495             // test if the segment is in the range of [slow, shigh]
21496             if (!((heap_segment_reserved (seg) >= slow) &&
21497                   (heap_segment_mem (seg) <= shigh)))
21498             {
21499                 // shorten it to minimum
21500                 heap_segment_allocated (seg) =  heap_segment_mem (seg);
21501             }
21502             seg = heap_segment_next_rw (seg);
21503         } while (seg);
21504     }
21505     else
21506     {
21507         heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21508
21509         PREFIX_ASSUME(seg != NULL);
21510
21511         heap_segment* sseg = seg;
21512         do
21513         {
21514             // shorten it to minimum
21515             if (seg == sseg)
21516             {
21517                 // no survivors make all generations look empty
21518                 uint8_t* o = generation_allocation_start (condemned_gen1) +
21519                     Align (size (generation_allocation_start (condemned_gen1)));
21520 #ifdef BACKGROUND_GC
21521                 if (current_c_gc_state == c_gc_state_marking)
21522                 {
21523                     bgc_clear_batch_mark_array_bits (o, heap_segment_allocated (seg));
21524                 }
21525 #endif //BACKGROUND_GC
21526                 heap_segment_allocated (seg) = o;
21527             }
21528             else
21529             {
21530                 assert (condemned_gen_number == max_generation);
21531 #ifdef BACKGROUND_GC
21532                 if (current_c_gc_state == c_gc_state_marking)
21533                 {
21534                     bgc_clear_batch_mark_array_bits (heap_segment_mem (seg), heap_segment_allocated (seg));
21535                 }
21536 #endif //BACKGROUND_GC
21537                 heap_segment_allocated (seg) =  heap_segment_mem (seg);
21538             }
21539             seg = heap_segment_next_rw (seg);
21540         } while (seg);
21541     }
21542
21543 #endif //MULTIPLE_HEAPS
21544
21545     heap_segment*  seg1 = heap_segment_rw (generation_start_segment (condemned_gen1));
21546
21547     PREFIX_ASSUME(seg1 != NULL);
21548
21549     uint8_t*  end = heap_segment_allocated (seg1);
21550     uint8_t*  first_condemned_address = generation_allocation_start (condemned_gen1);
21551     uint8_t*  x = first_condemned_address;
21552
21553     assert (!marked (x));
21554     uint8_t*  plug_end = x;
21555     uint8_t*  tree = 0;
21556     size_t  sequence_number = 0;
21557     uint8_t*  last_node = 0;
21558     size_t  current_brick = brick_of (x);
21559     BOOL  allocate_in_condemned = ((condemned_gen_number == max_generation)||
21560                                    (settings.promotion == FALSE));
21561     int  active_old_gen_number = condemned_gen_number;
21562     int  active_new_gen_number = (allocate_in_condemned ? condemned_gen_number:
21563                                   (1 + condemned_gen_number));
21564     generation*  older_gen = 0;
21565     generation* consing_gen = condemned_gen1;
21566     alloc_list  r_free_list [MAX_BUCKET_COUNT];
21567
21568     size_t r_free_list_space = 0;
21569     size_t r_free_obj_space = 0;
21570     size_t r_older_gen_free_list_allocated = 0;
21571     size_t r_older_gen_condemned_allocated = 0;
21572     size_t r_older_gen_end_seg_allocated = 0;
21573     uint8_t*  r_allocation_pointer = 0;
21574     uint8_t*  r_allocation_limit = 0;
21575     uint8_t* r_allocation_start_region = 0;
21576     heap_segment*  r_allocation_segment = 0;
21577 #ifdef FREE_USAGE_STATS
21578     size_t r_older_gen_free_space[NUM_GEN_POWER2];
21579 #endif //FREE_USAGE_STATS
21580
21581     if ((condemned_gen_number < max_generation))
21582     {
21583         older_gen = generation_of (min (max_generation, 1 + condemned_gen_number));
21584         generation_allocator (older_gen)->copy_to_alloc_list (r_free_list);
21585
21586         r_free_list_space = generation_free_list_space (older_gen);
21587         r_free_obj_space = generation_free_obj_space (older_gen);
21588 #ifdef FREE_USAGE_STATS
21589         memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space));
21590 #endif //FREE_USAGE_STATS
21591         generation_allocate_end_seg_p (older_gen) = FALSE;
21592         r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen);
21593         r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen);
21594         r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen);
21595         r_allocation_limit = generation_allocation_limit (older_gen);
21596         r_allocation_pointer = generation_allocation_pointer (older_gen);
21597         r_allocation_start_region = generation_allocation_context_start_region (older_gen);
21598         r_allocation_segment = generation_allocation_segment (older_gen);
21599         heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen));
21600
21601         PREFIX_ASSUME(start_seg != NULL);
21602
21603         if (start_seg != ephemeral_heap_segment)
21604         {
21605             assert (condemned_gen_number == (max_generation - 1));
21606             while (start_seg && (start_seg != ephemeral_heap_segment))
21607             {
21608                 assert (heap_segment_allocated (start_seg) >=
21609                         heap_segment_mem (start_seg));
21610                 assert (heap_segment_allocated (start_seg) <=
21611                         heap_segment_reserved (start_seg));
21612                 heap_segment_plan_allocated (start_seg) =
21613                     heap_segment_allocated (start_seg);
21614                 start_seg = heap_segment_next_rw (start_seg);
21615             }
21616         }
21617     }
21618
21619     //reset all of the segment allocated sizes
21620     {
21621         heap_segment*  seg2 = heap_segment_rw (generation_start_segment (condemned_gen1));
21622
21623         PREFIX_ASSUME(seg2 != NULL);
21624
21625         while (seg2)
21626         {
21627             heap_segment_plan_allocated (seg2) =
21628                 heap_segment_mem (seg2);
21629             seg2 = heap_segment_next_rw (seg2);
21630         }
21631     }
21632     int  condemned_gn = condemned_gen_number;
21633
21634     int bottom_gen = 0;
21635     init_free_and_plug();
21636
21637     while (condemned_gn >= bottom_gen)
21638     {
21639         generation*  condemned_gen2 = generation_of (condemned_gn);
21640         generation_allocator (condemned_gen2)->clear();
21641         generation_free_list_space (condemned_gen2) = 0;
21642         generation_free_obj_space (condemned_gen2) = 0;
21643         generation_allocation_size (condemned_gen2) = 0;
21644         generation_condemned_allocated (condemned_gen2) = 0; 
21645         generation_pinned_allocated (condemned_gen2) = 0; 
21646         generation_free_list_allocated(condemned_gen2) = 0; 
21647         generation_end_seg_allocated (condemned_gen2) = 0; 
21648         generation_pinned_allocation_sweep_size (condemned_gen2) = 0;
21649         generation_pinned_allocation_compact_size (condemned_gen2) = 0;
21650 #ifdef FREE_USAGE_STATS
21651         generation_pinned_free_obj_space (condemned_gen2) = 0;
21652         generation_allocated_in_pinned_free (condemned_gen2) = 0;
21653         generation_allocated_since_last_pin (condemned_gen2) = 0;
21654 #endif //FREE_USAGE_STATS
21655         generation_plan_allocation_start (condemned_gen2) = 0;
21656         generation_allocation_segment (condemned_gen2) =
21657             heap_segment_rw (generation_start_segment (condemned_gen2));
21658
21659         PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL);
21660
21661         if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment)
21662         {
21663             generation_allocation_pointer (condemned_gen2) =
21664                 heap_segment_mem (generation_allocation_segment (condemned_gen2));
21665         }
21666         else
21667         {
21668             generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2);
21669         }
21670
21671         generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
21672         generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
21673
21674         condemned_gn--;
21675     }
21676
21677     BOOL allocate_first_generation_start = FALSE;
21678     
21679     if (allocate_in_condemned)
21680     {
21681         allocate_first_generation_start = TRUE;
21682     }
21683
21684     dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
21685
21686     demotion_low = MAX_PTR;
21687     demotion_high = heap_segment_allocated (ephemeral_heap_segment);
21688
21689     // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs
21690     // from gen1. They should get promoted to gen2.
21691     demote_gen1_p = !(settings.promotion && 
21692                       (settings.condemned_generation == (max_generation - 1)) && 
21693                       gen_to_condemn_reasons.is_only_condition (gen_low_card_p));
21694
21695     total_ephemeral_size = 0;
21696
21697     print_free_and_plug ("BP");
21698
21699     for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
21700     {
21701         generation* temp_gen = generation_of (gen_idx);
21702
21703         dprintf (2, ("gen%d start %Ix, plan start %Ix",
21704             gen_idx, 
21705             generation_allocation_start (temp_gen),
21706             generation_plan_allocation_start (temp_gen)));
21707     }
21708
21709     BOOL fire_pinned_plug_events_p = ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, PinPlugAtGCTime);
21710     size_t last_plug_len = 0;
21711
21712     while (1)
21713     {
21714         if (x >= end)
21715         {
21716             assert (x == end);
21717             assert (heap_segment_allocated (seg1) == end);
21718             heap_segment_allocated (seg1) = plug_end;
21719
21720             current_brick = update_brick_table (tree, current_brick, x, plug_end);
21721             dprintf (3, ("end of seg: new tree, sequence# 0"));
21722             sequence_number = 0;
21723             tree = 0;
21724
21725             if (heap_segment_next_rw (seg1))
21726             {
21727                 seg1 = heap_segment_next_rw (seg1);
21728                 end = heap_segment_allocated (seg1);
21729                 plug_end = x = heap_segment_mem (seg1);
21730                 current_brick = brick_of (x);
21731                 dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
21732                 continue;
21733             }
21734             else
21735             {
21736                 break;
21737             }
21738         }
21739
21740         BOOL last_npinned_plug_p = FALSE;
21741         BOOL last_pinned_plug_p = FALSE;
21742
21743         // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned
21744         // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs -
21745         // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it.
21746         uint8_t* last_pinned_plug = 0;
21747         size_t num_pinned_plugs_in_plug = 0;
21748
21749         uint8_t* last_object_in_plug = 0;
21750
21751         while ((x < end) && marked (x))
21752         {
21753             uint8_t*  plug_start = x;
21754             uint8_t*  saved_plug_end = plug_end;
21755             BOOL   pinned_plug_p = FALSE;
21756             BOOL   npin_before_pin_p = FALSE;
21757             BOOL   saved_last_npinned_plug_p = last_npinned_plug_p;
21758             uint8_t*  saved_last_object_in_plug = last_object_in_plug;
21759             BOOL   merge_with_last_pin_p = FALSE;
21760
21761             size_t added_pinning_size = 0;
21762             size_t artificial_pinned_size = 0;
21763
21764             store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p, 
21765                                  last_pinned_plug, pinned_plug_p, last_object_in_plug, 
21766                                  merge_with_last_pin_p, last_plug_len);
21767
21768 #ifdef FEATURE_STRUCTALIGN
21769             int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment();
21770             size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET;
21771 #endif // FEATURE_STRUCTALIGN
21772
21773             {
21774                 uint8_t* xl = x;
21775                 while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p))
21776                 {
21777                     assert (xl < end);
21778                     if (pinned(xl))
21779                     {
21780                         clear_pinned (xl);
21781                     }
21782 #ifdef FEATURE_STRUCTALIGN
21783                     else
21784                     {
21785                         int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment();
21786                         if (obj_requiredAlignment > requiredAlignment)
21787                         {
21788                             requiredAlignment = obj_requiredAlignment;
21789                             alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET;
21790                         }
21791                     }
21792 #endif // FEATURE_STRUCTALIGN
21793
21794                     clear_marked (xl);
21795
21796                     dprintf(4, ("+%Ix+", (size_t)xl));
21797                     assert ((size (xl) > 0));
21798                     assert ((size (xl) <= LARGE_OBJECT_SIZE));
21799
21800                     last_object_in_plug = xl;
21801
21802                     xl = xl + Align (size (xl));
21803                     Prefetch (xl);
21804                 }
21805
21806                 BOOL next_object_marked_p = ((xl < end) && marked (xl));
21807
21808                 if (pinned_plug_p)
21809                 {
21810                     // If it is pinned we need to extend to the next marked object as we can't use part of
21811                     // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all
21812                     // references but for now I am just using the next non pinned object for that).
21813                     if (next_object_marked_p) 
21814                     {
21815                         clear_marked (xl);
21816                         last_object_in_plug = xl;
21817                         size_t extra_size = Align (size (xl));
21818                         xl = xl + extra_size;
21819                         added_pinning_size = extra_size;
21820                     }
21821                 }
21822                 else
21823                 {
21824                     if (next_object_marked_p)
21825                         npin_before_pin_p = TRUE;
21826                 }
21827
21828                 assert (xl <= end);
21829                 x = xl;
21830             }
21831             dprintf (3, ( "%Ix[", (size_t)x));
21832             plug_end = x;
21833             size_t ps = plug_end - plug_start;
21834             last_plug_len = ps;
21835             dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps));
21836             uint8_t*  new_address = 0;
21837
21838             if (!pinned_plug_p)
21839             {
21840                 if (allocate_in_condemned &&
21841                     (settings.condemned_generation == max_generation) &&
21842                     (ps > OS_PAGE_SIZE))
21843                 {
21844                     ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen);
21845                     //reloc should >=0 except when we relocate
21846                     //across segments and the dest seg is higher then the src
21847
21848                     if ((ps > (8*OS_PAGE_SIZE)) &&
21849                         (reloc > 0) &&
21850                         ((size_t)reloc < (ps/16)))
21851                     {
21852                         dprintf (3, ("Pinning %Ix; reloc would have been: %Ix",
21853                                      (size_t)plug_start, reloc));
21854                         // The last plug couldn't have been a npinned plug or it would have
21855                         // included this plug.
21856                         assert (!saved_last_npinned_plug_p);
21857
21858                         if (last_pinned_plug)
21859                         {
21860                             dprintf (3, ("artificially pinned plug merged with last pinned plug"));
21861                             merge_with_last_pin_p = TRUE;
21862                         }
21863                         else
21864                         {
21865                             enque_pinned_plug (plug_start, FALSE, 0);
21866                             last_pinned_plug = plug_start;
21867                         }
21868
21869                         convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
21870                                                 ps, artificial_pinned_size);
21871                     }
21872                 }
21873             }
21874
21875             if (allocate_first_generation_start)
21876             {
21877                 allocate_first_generation_start = FALSE;
21878                 plan_generation_start (condemned_gen1, consing_gen, plug_start);
21879                 assert (generation_plan_allocation_start (condemned_gen1));
21880             }
21881
21882             if (seg1 == ephemeral_heap_segment)
21883             {
21884                 process_ephemeral_boundaries (plug_start, active_new_gen_number,
21885                                               active_old_gen_number,
21886                                               consing_gen,
21887                                               allocate_in_condemned);
21888             }
21889
21890             dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number));
21891
21892             dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number);
21893             dd_survived_size (dd_active_old) += ps;
21894
21895             BOOL convert_to_pinned_p = FALSE;
21896
21897             if (!pinned_plug_p)
21898             {
21899 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
21900                 dd_num_npinned_plugs (dd_active_old)++;
21901 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
21902
21903                 add_gen_plug (active_old_gen_number, ps);
21904
21905                 if (allocate_in_condemned)
21906                 {
21907                     verify_pins_with_post_plug_info("before aic");
21908
21909                     new_address =
21910                         allocate_in_condemned_generations (consing_gen,
21911                                                            ps,
21912                                                            active_old_gen_number,
21913 #ifdef SHORT_PLUGS
21914                                                            &convert_to_pinned_p,
21915                                                            (npin_before_pin_p ? plug_end : 0),
21916                                                            seg1,
21917 #endif //SHORT_PLUGS
21918                                                            plug_start REQD_ALIGN_AND_OFFSET_ARG);
21919                     verify_pins_with_post_plug_info("after aic");
21920                 }
21921                 else
21922                 {
21923                     new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG);
21924
21925                     if (new_address != 0)
21926                     {
21927                         if (settings.condemned_generation == (max_generation - 1))
21928                         {
21929                             dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)",
21930                                 plug_start, plug_end,
21931                                 (size_t)new_address, (size_t)new_address + (plug_end - plug_start),
21932                                 (size_t)(plug_end - plug_start)));
21933                         }
21934                     }
21935                     else
21936                     {
21937                         allocate_in_condemned = TRUE;
21938
21939                         new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number, 
21940 #ifdef SHORT_PLUGS
21941                                                                          &convert_to_pinned_p,
21942                                                                          (npin_before_pin_p ? plug_end : 0),
21943                                                                          seg1,
21944 #endif //SHORT_PLUGS
21945                                                                          plug_start REQD_ALIGN_AND_OFFSET_ARG);
21946                     }
21947                 }
21948
21949                 if (convert_to_pinned_p)
21950                 {
21951                     assert (last_npinned_plug_p != FALSE);
21952                     assert (last_pinned_plug_p == FALSE);
21953                     convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
21954                                             ps, artificial_pinned_size);
21955                     enque_pinned_plug (plug_start, FALSE, 0);
21956                     last_pinned_plug = plug_start;
21957                 }
21958                 else
21959                 {
21960                     if (!new_address)
21961                     {
21962                         //verify that we are at then end of the ephemeral segment
21963                         assert (generation_allocation_segment (consing_gen) ==
21964                                 ephemeral_heap_segment);
21965                         //verify that we are near the end
21966                         assert ((generation_allocation_pointer (consing_gen) + Align (ps)) <
21967                                 heap_segment_allocated (ephemeral_heap_segment));
21968                         assert ((generation_allocation_pointer (consing_gen) + Align (ps)) >
21969                                 (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size)));
21970                     }
21971                     else
21972                     {
21973 #ifdef SIMPLE_DPRINTF
21974                         dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d)",
21975                             (size_t)(node_gap_size (plug_start)), 
21976                             plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address),
21977                                 (size_t)new_address + ps, ps, 
21978                                 (is_plug_padded (plug_start) ? 1 : 0)));
21979 #endif //SIMPLE_DPRINTF
21980
21981 #ifdef SHORT_PLUGS
21982                         if (is_plug_padded (plug_start))
21983                         {
21984                             dprintf (3, ("%Ix was padded", plug_start));
21985                             dd_padding_size (dd_active_old) += Align (min_obj_size);
21986                         }
21987 #endif //SHORT_PLUGS
21988                     }
21989                 }
21990             }
21991
21992             if (pinned_plug_p)
21993             {
21994                 if (fire_pinned_plug_events_p)
21995                     FireEtwPinPlugAtGCTime(plug_start, plug_end, 
21996                                            (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start)),
21997                                            GetClrInstanceId());
21998
21999                 if (merge_with_last_pin_p)
22000                 {
22001                     merge_with_last_pinned_plug (last_pinned_plug, ps);
22002                 }
22003                 else
22004                 {
22005                     assert (last_pinned_plug == plug_start);
22006                     set_pinned_info (plug_start, ps, consing_gen);
22007                 }
22008
22009                 new_address = plug_start;
22010
22011                 dprintf (3, ( "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)",
22012                             (size_t)(node_gap_size (plug_start)), (size_t)plug_start,
22013                             (size_t)plug_end, ps,
22014                             (merge_with_last_pin_p ? 1 : 0)));
22015
22016                 dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number));
22017                 dd_pinned_survived_size (dd_active_old) += plug_end - plug_start;
22018                 dd_added_pinned_size (dd_active_old) += added_pinning_size;
22019                 dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size;
22020
22021                 if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1)))
22022                 {
22023                     last_gen1_pin_end = plug_end;
22024                 }
22025             }
22026
22027 #ifdef _DEBUG
22028             // detect forward allocation in the same segment
22029             assert (!((new_address > plug_start) &&
22030                 (new_address < heap_segment_reserved (seg1))));
22031 #endif //_DEBUG
22032
22033             if (!merge_with_last_pin_p)
22034             {
22035                 if (current_brick != brick_of (plug_start))
22036                 {
22037                     current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end);
22038                     sequence_number = 0;
22039                     tree = 0;
22040                 }
22041
22042                 set_node_relocation_distance (plug_start, (new_address - plug_start));
22043                 if (last_node && (node_relocation_distance (last_node) ==
22044                                   (node_relocation_distance (plug_start) +
22045                                    node_gap_size (plug_start))))
22046                 {
22047                     //dprintf(3,( " Lb"));
22048                     dprintf (3, ("%Ix Lb", plug_start));
22049                     set_node_left (plug_start);
22050                 }
22051                 if (0 == sequence_number)
22052                 {
22053                     dprintf (2, ("sn: 0, tree is set to %Ix", plug_start));
22054                     tree = plug_start;
22055                 }
22056
22057                 verify_pins_with_post_plug_info("before insert node");
22058
22059                 tree = insert_node (plug_start, ++sequence_number, tree, last_node);
22060                 dprintf (3, ("tree is %Ix (b: %Ix) after insert_node", tree, brick_of (tree)));
22061                 last_node = plug_start;
22062
22063 #ifdef _DEBUG
22064                 // If we detect if the last plug is pinned plug right before us, we should save this gap info
22065                 if (!pinned_plug_p)
22066                 {
22067                     if (mark_stack_tos > 0)
22068                     {
22069                         mark& m = mark_stack_array[mark_stack_tos - 1];
22070                         if (m.has_post_plug_info())
22071                         {
22072                             uint8_t* post_plug_info_start = m.saved_post_plug_info_start;
22073                             size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap));
22074                             if ((uint8_t*)current_plug_gap_start == post_plug_info_start)
22075                             {
22076                                 dprintf (3, ("Ginfo: %Ix, %Ix, %Ix",
22077                                     *current_plug_gap_start, *(current_plug_gap_start + 1),
22078                                     *(current_plug_gap_start + 2)));
22079                                 memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair));
22080                             }
22081                         }
22082                     }
22083                 }
22084 #endif //_DEBUG
22085
22086                 verify_pins_with_post_plug_info("after insert node");
22087             }
22088         }
22089         
22090         if (num_pinned_plugs_in_plug > 1)
22091         {
22092             dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug));
22093         }
22094
22095         {
22096 #ifdef MARK_LIST
22097             if (use_mark_list)
22098             {
22099                while ((mark_list_next < mark_list_index) &&
22100                       (*mark_list_next <= x))
22101                {
22102                    mark_list_next++;
22103                }
22104                if ((mark_list_next < mark_list_index)
22105 #ifdef MULTIPLE_HEAPS
22106                    && (*mark_list_next < end) //for multiple segments
22107 #endif //MULTIPLE_HEAPS
22108                    )
22109                    x = *mark_list_next;
22110                else
22111                    x = end;
22112             }
22113             else
22114 #endif //MARK_LIST
22115             {
22116                 uint8_t* xl = x;
22117 #ifdef BACKGROUND_GC
22118                 if (current_c_gc_state == c_gc_state_marking)
22119                 {
22120                     assert (recursive_gc_sync::background_running_p());
22121                     while ((xl < end) && !marked (xl))
22122                     {
22123                         dprintf (4, ("-%Ix-", (size_t)xl));
22124                         assert ((size (xl) > 0));
22125                         background_object_marked (xl, TRUE);
22126                         xl = xl + Align (size (xl));
22127                         Prefetch (xl);
22128                     }
22129                 }
22130                 else
22131 #endif //BACKGROUND_GC
22132                 {
22133                     while ((xl < end) && !marked (xl))
22134                     {
22135                         dprintf (4, ("-%Ix-", (size_t)xl));
22136                         assert ((size (xl) > 0));
22137                         xl = xl + Align (size (xl));
22138                         Prefetch (xl);
22139                     }
22140                 }
22141                 assert (xl <= end);
22142                 x = xl;
22143             }
22144         }
22145     }
22146
22147     while (!pinned_plug_que_empty_p())
22148     {
22149         if (settings.promotion)
22150         {
22151             uint8_t* pplug = pinned_plug (oldest_pin());
22152             if (in_range_for_segment (pplug, ephemeral_heap_segment))
22153             {
22154                 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
22155                 //allocate all of the generation gaps
22156                 while (active_new_gen_number > 0)
22157                 {
22158                     active_new_gen_number--;
22159
22160                     if (active_new_gen_number == (max_generation - 1))
22161                     {
22162                         maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
22163                         if (!demote_gen1_p)
22164                             advance_pins_for_demotion (consing_gen);
22165                     }
22166
22167                     generation* gen = generation_of (active_new_gen_number);
22168                     plan_generation_start (gen, consing_gen, 0);
22169
22170                     if (demotion_low == MAX_PTR)
22171                     {
22172                         demotion_low = pplug;
22173                         dprintf (3, ("end plan: dlow->%Ix", demotion_low));
22174                     }
22175
22176                     dprintf (2, ("(%d)gen%d plan start: %Ix", 
22177                                   heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen)));
22178                     assert (generation_plan_allocation_start (gen));
22179                 }
22180             }
22181         }
22182
22183         if (pinned_plug_que_empty_p())
22184             break;
22185
22186         size_t  entry = deque_pinned_plug();
22187         mark*  m = pinned_plug_of (entry);
22188         uint8_t*  plug = pinned_plug (m);
22189         size_t  len = pinned_len (m);
22190
22191         // detect pinned block in different segment (later) than
22192         // allocation segment
22193         heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen));
22194
22195         while ((plug < generation_allocation_pointer (consing_gen)) ||
22196                (plug >= heap_segment_allocated (nseg)))
22197         {
22198             assert ((plug < heap_segment_mem (nseg)) ||
22199                     (plug > heap_segment_reserved (nseg)));
22200             //adjust the end of the segment to be the end of the plug
22201             assert (generation_allocation_pointer (consing_gen)>=
22202                     heap_segment_mem (nseg));
22203             assert (generation_allocation_pointer (consing_gen)<=
22204                     heap_segment_committed (nseg));
22205
22206             heap_segment_plan_allocated (nseg) =
22207                 generation_allocation_pointer (consing_gen);
22208             //switch allocation segment
22209             nseg = heap_segment_next_rw (nseg);
22210             generation_allocation_segment (consing_gen) = nseg;
22211             //reset the allocation pointer and limits
22212             generation_allocation_pointer (consing_gen) =
22213                 heap_segment_mem (nseg);
22214         }
22215
22216         set_new_pin_info (m, generation_allocation_pointer (consing_gen));
22217         dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug),
22218             (size_t)(brick_table[brick_of (plug)])));
22219
22220         generation_allocation_pointer (consing_gen) = plug + len;
22221         generation_allocation_limit (consing_gen) =
22222             generation_allocation_pointer (consing_gen);
22223         //Add the size of the pinned plug to the right pinned allocations
22224         //find out which gen this pinned plug came from 
22225         int frgn = object_gennum (plug);
22226         if ((frgn != (int)max_generation) && settings.promotion)
22227         {
22228             generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
22229         }
22230
22231     }
22232
22233     plan_generation_starts (consing_gen);
22234     print_free_and_plug ("AP");
22235
22236     {
22237 #ifdef SIMPLE_DPRINTF
22238         for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
22239         {
22240             generation* temp_gen = generation_of (gen_idx);
22241             dynamic_data* temp_dd = dynamic_data_of (gen_idx);
22242
22243             int added_pinning_ratio = 0;
22244             int artificial_pinned_ratio = 0;
22245
22246             if (dd_pinned_survived_size (temp_dd) != 0)
22247             {
22248                 added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22249                 artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22250             }
22251
22252             size_t padding_size = 
22253 #ifdef SHORT_PLUGS
22254                 dd_padding_size (temp_dd);
22255 #else
22256                 0;
22257 #endif //SHORT_PLUGS
22258             dprintf (1, ("gen%d: %Ix, %Ix(%Id), NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id",
22259                 gen_idx, 
22260                 generation_allocation_start (temp_gen),
22261                 generation_plan_allocation_start (temp_gen),
22262                 (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)),
22263                 generation_allocation_size (temp_gen),
22264                 generation_pinned_allocation_compact_size (temp_gen),
22265                 generation_pinned_allocation_sweep_size (temp_gen),
22266                 dd_survived_size (temp_dd),
22267                 dd_pinned_survived_size (temp_dd),
22268                 added_pinning_ratio,
22269                 artificial_pinned_ratio,
22270                 (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)),
22271                 padding_size));
22272         }
22273 #endif //SIMPLE_DPRINTF
22274     }
22275
22276     if (settings.condemned_generation == (max_generation - 1 ))
22277     {
22278         size_t plan_gen2_size = generation_plan_size (max_generation);
22279         size_t growth = plan_gen2_size - old_gen2_size;
22280
22281         if (growth > 0)
22282         {
22283             dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, gen1 c alloc: %Id", 
22284                 growth, generation_end_seg_allocated (generation_of (max_generation)), 
22285                 generation_condemned_allocated (generation_of (max_generation - 1))));
22286         }
22287         else
22288         {
22289             dprintf (1, ("gen2 shrank %Id (end seg alloc: %Id, gen1 c alloc: %Id", 
22290                 (old_gen2_size - plan_gen2_size), generation_end_seg_allocated (generation_of (max_generation)), 
22291                 generation_condemned_allocated (generation_of (max_generation - 1))));
22292         }
22293
22294         generation* older_gen = generation_of (settings.condemned_generation + 1);
22295         size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space;
22296         size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated;
22297         size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated;
22298         size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated;
22299
22300         dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id",
22301                     r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen),
22302                     r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen), 
22303                     r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen)));
22304
22305         dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected), %Id seg alloc and %Id condemned alloc, gen1 condemned alloc is %Id", 
22306             free_list_allocated, rejected_free_space, end_seg_allocated,
22307             condemned_allocated, generation_condemned_allocated (generation_of (settings.condemned_generation))));
22308
22309         maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
22310         maxgen_size_info->free_list_allocated = free_list_allocated;
22311         maxgen_size_info->free_list_rejected = rejected_free_space;
22312         maxgen_size_info->end_seg_allocated = end_seg_allocated;
22313         maxgen_size_info->condemned_allocated = condemned_allocated;
22314         maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance;
22315         maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance;
22316
22317 #ifdef FREE_USAGE_STATS
22318         int free_list_efficiency = 0;
22319         if ((free_list_allocated + rejected_free_space) != 0)
22320             free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100);
22321
22322         int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100);
22323
22324         dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%",
22325                     older_gen->gen_num,
22326                     free_list_efficiency, running_free_list_efficiency));
22327
22328         dprintf (1, ("gen2 free list change"));
22329         for (int j = 0; j < NUM_GEN_POWER2; j++)
22330         {
22331             dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id", 
22332                 heap_number, 
22333                 settings.gc_index,
22334                 (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j], 
22335                 (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]),
22336                 (generation_of(max_generation - 1))->gen_plugs[j]));
22337         }
22338 #endif //FREE_USAGE_STATS
22339     }
22340
22341     size_t fragmentation =
22342         generation_fragmentation (generation_of (condemned_gen_number),
22343                                   consing_gen,
22344                                   heap_segment_allocated (ephemeral_heap_segment));
22345
22346     dprintf (2,("Fragmentation: %Id", fragmentation));
22347     dprintf (2,("---- End of Plan phase ----"));
22348
22349 #ifdef TIME_GC
22350     finish = GetCycleCount32();
22351     plan_time = finish - start;
22352 #endif //TIME_GC
22353
22354     // We may update write barrier code.  We assume here EE has been suspended if we are on a GC thread.
22355     assert(IsGCInProgress());
22356
22357     BOOL should_expand = FALSE;
22358     BOOL should_compact= FALSE;
22359     ephemeral_promotion = FALSE;
22360
22361 #ifdef BIT64
22362     if ((!settings.concurrent) &&
22363         ((condemned_gen_number < max_generation) && 
22364          ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95))))
22365     {
22366         dprintf (2, ("gen0 reduction count is %d, condemning %d, mem load %d",
22367                      settings.gen0_reduction_count,
22368                      condemned_gen_number,
22369                      settings.entry_memory_load));
22370         should_compact = TRUE;
22371
22372         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, 
22373             ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load));
22374
22375         if ((condemned_gen_number >= (max_generation - 1)) && 
22376             dt_low_ephemeral_space_p (tuning_deciding_expansion))
22377         {
22378             dprintf (2, ("Not enough space for all ephemeral generations with compaction"));
22379             should_expand = TRUE;
22380         }
22381     }
22382     else
22383     {
22384 #endif // BIT64
22385         should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand);
22386 #ifdef BIT64
22387     }
22388 #endif // BIT64
22389
22390 #ifdef FEATURE_LOH_COMPACTION
22391     loh_compacted_p = FALSE;
22392 #endif //FEATURE_LOH_COMPACTION
22393
22394     if (condemned_gen_number == max_generation)
22395     {
22396 #ifdef FEATURE_LOH_COMPACTION
22397         if (settings.loh_compaction)
22398         {
22399             if (plan_loh())
22400             {
22401                 should_compact = TRUE;
22402                 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
22403                 loh_compacted_p = TRUE;
22404             }
22405         }
22406         else
22407         {
22408             if ((heap_number == 0) && (loh_pinned_queue))
22409             {
22410                 loh_pinned_queue_decay--;
22411
22412                 if (!loh_pinned_queue_decay)
22413                 {
22414                     delete loh_pinned_queue;
22415                     loh_pinned_queue = 0;
22416                 }
22417             }
22418         }
22419
22420         if (!loh_compacted_p)
22421 #endif //FEATURE_LOH_COMPACTION
22422         {
22423             GCToEEInterface::DiagWalkLOHSurvivors(__this);
22424             sweep_large_objects();
22425         }
22426     }
22427     else
22428     {
22429         settings.loh_compaction = FALSE;
22430     }
22431
22432 #ifdef MULTIPLE_HEAPS
22433
22434     new_heap_segment = NULL;
22435
22436     if (should_compact && should_expand)
22437         gc_policy = policy_expand;
22438     else if (should_compact)
22439         gc_policy = policy_compact;
22440     else
22441         gc_policy = policy_sweep;
22442
22443     //vote for result of should_compact
22444     dprintf (3, ("Joining for compaction decision"));
22445     gc_t_join.join(this, gc_join_decide_on_compaction);
22446     if (gc_t_join.joined())
22447     {
22448         //safe place to delete large heap segments
22449         if (condemned_gen_number == max_generation)
22450         {
22451             for (int i = 0; i < n_heaps; i++)
22452             {
22453                 g_heaps [i]->rearrange_large_heap_segments ();
22454             }
22455         }
22456
22457         settings.demotion = FALSE;
22458         int pol_max = policy_sweep;
22459 #ifdef GC_CONFIG_DRIVEN
22460         BOOL is_compaction_mandatory = FALSE;
22461 #endif //GC_CONFIG_DRIVEN
22462
22463         int i;
22464         for (i = 0; i < n_heaps; i++)
22465         {
22466             if (pol_max < g_heaps[i]->gc_policy)
22467                 pol_max = policy_compact;
22468             // set the demotion flag is any of the heap has demotion
22469             if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
22470             {
22471                 (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
22472                 settings.demotion = TRUE;
22473             }
22474
22475 #ifdef GC_CONFIG_DRIVEN
22476             if (!is_compaction_mandatory)
22477             {
22478                 int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact);
22479                 if (compact_reason >= 0)
22480                 {
22481                     if (gc_heap_compact_reason_mandatory_p[compact_reason])
22482                         is_compaction_mandatory = TRUE;
22483                 }
22484             }
22485 #endif //GC_CONFIG_DRIVEN
22486         }
22487
22488 #ifdef GC_CONFIG_DRIVEN
22489         if (!is_compaction_mandatory)
22490         {
22491             // If compaction is not mandatory we can feel free to change it to a sweeping GC.
22492             // Note that we may want to change this to only checking every so often instead of every single GC.
22493             if (should_do_sweeping_gc (pol_max >= policy_compact))
22494             {
22495                 pol_max = policy_sweep;
22496             }
22497             else
22498             {
22499                 if (pol_max == policy_sweep)
22500                     pol_max = policy_compact;
22501             }
22502         }
22503 #endif //GC_CONFIG_DRIVEN
22504
22505         for (i = 0; i < n_heaps; i++)
22506         {
22507             if (pol_max > g_heaps[i]->gc_policy)
22508                 g_heaps[i]->gc_policy = pol_max;
22509             //get the segment while we are serialized
22510             if (g_heaps[i]->gc_policy == policy_expand)
22511             {
22512                 g_heaps[i]->new_heap_segment =
22513                      g_heaps[i]->soh_get_segment_to_expand();
22514                 if (!g_heaps[i]->new_heap_segment)
22515                 {
22516                     set_expand_in_full_gc (condemned_gen_number);
22517                     //we are out of memory, cancel the expansion
22518                     g_heaps[i]->gc_policy = policy_compact;
22519                 }
22520             }
22521         }
22522
22523         BOOL is_full_compacting_gc = FALSE;
22524
22525         if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation))
22526         {
22527             full_gc_counts[gc_type_compacting]++;
22528             is_full_compacting_gc = TRUE;
22529         }
22530
22531         for (i = 0; i < n_heaps; i++)
22532         {
22533             //copy the card and brick tables
22534             if (g_gc_card_table!= g_heaps[i]->card_table)
22535             {
22536                 g_heaps[i]->copy_brick_card_table();
22537             }
22538
22539             if (is_full_compacting_gc)
22540             {
22541                 g_heaps[i]->loh_alloc_since_cg = 0;
22542             }
22543         }
22544
22545         //start all threads on the roots.
22546         dprintf(3, ("Starting all gc threads after compaction decision"));
22547         gc_t_join.restart();
22548     }
22549
22550     //reset the local variable accordingly
22551     should_compact = (gc_policy >= policy_compact);
22552     should_expand  = (gc_policy >= policy_expand);
22553
22554 #else //MULTIPLE_HEAPS
22555
22556     //safe place to delete large heap segments
22557     if (condemned_gen_number == max_generation)
22558     {
22559         rearrange_large_heap_segments ();
22560     }
22561
22562     settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE);
22563     if (settings.demotion)
22564         get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
22565
22566 #ifdef GC_CONFIG_DRIVEN
22567     BOOL is_compaction_mandatory = FALSE;
22568     int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
22569     if (compact_reason >= 0)
22570         is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason];
22571
22572     if (!is_compaction_mandatory)
22573     {
22574         if (should_do_sweeping_gc (should_compact))
22575             should_compact = FALSE;
22576         else
22577             should_compact = TRUE;
22578     }
22579 #endif //GC_CONFIG_DRIVEN
22580
22581     if (should_compact && (condemned_gen_number == max_generation))
22582     {
22583         full_gc_counts[gc_type_compacting]++;
22584         loh_alloc_since_cg = 0;
22585     }
22586 #endif //MULTIPLE_HEAPS
22587
22588     if (should_compact)
22589     {
22590         dprintf (2,( "**** Doing Compacting GC ****"));
22591
22592         if (should_expand)
22593         {
22594 #ifndef MULTIPLE_HEAPS
22595             heap_segment* new_heap_segment = soh_get_segment_to_expand();
22596 #endif //!MULTIPLE_HEAPS
22597             if (new_heap_segment)
22598             {
22599                 consing_gen = expand_heap(condemned_gen_number,
22600                                           consing_gen,
22601                                           new_heap_segment);
22602             }
22603
22604             // If we couldn't get a new segment, or we were able to 
22605             // reserve one but no space to commit, we couldn't
22606             // expand heap.
22607             if (ephemeral_heap_segment != new_heap_segment)
22608             {
22609                 set_expand_in_full_gc (condemned_gen_number);
22610                 should_expand = FALSE;
22611             }
22612         }
22613         generation_allocation_limit (condemned_gen1) =
22614             generation_allocation_pointer (condemned_gen1);
22615         if ((condemned_gen_number < max_generation))
22616         {
22617             generation_allocator (older_gen)->commit_alloc_list_changes();
22618
22619             // Fix the allocation area of the older generation
22620             fix_older_allocation_area (older_gen);
22621         }
22622         assert (generation_allocation_segment (consing_gen) ==
22623                 ephemeral_heap_segment);
22624
22625         GCToEEInterface::DiagWalkSurvivors(__this);
22626
22627         relocate_phase (condemned_gen_number, first_condemned_address);
22628         compact_phase (condemned_gen_number, first_condemned_address,
22629                        (!settings.demotion && settings.promotion));
22630         fix_generation_bounds (condemned_gen_number, consing_gen);
22631         assert (generation_allocation_limit (youngest_generation) ==
22632                 generation_allocation_pointer (youngest_generation));
22633         if (condemned_gen_number >= (max_generation -1))
22634         {
22635 #ifdef MULTIPLE_HEAPS
22636             // this needs be serialized just because we have one
22637             // segment_standby_list/seg_table for all heaps. We should make it at least
22638             // so that when hoarding is not on we don't need this join because
22639             // decommitting memory can take a long time.
22640             //must serialize on deleting segments
22641             gc_t_join.join(this, gc_join_rearrange_segs_compaction);
22642             if (gc_t_join.joined())
22643             {
22644                 for (int i = 0; i < n_heaps; i++)
22645                 {
22646                     g_heaps[i]->rearrange_heap_segments(TRUE);
22647                 }
22648                 gc_t_join.restart();
22649             }
22650 #else
22651             rearrange_heap_segments(TRUE);
22652 #endif //MULTIPLE_HEAPS
22653
22654             if (should_expand)
22655             {
22656                 //fix the start_segment for the ephemeral generations
22657                 for (int i = 0; i < max_generation; i++)
22658                 {
22659                     generation* gen = generation_of (i);
22660                     generation_start_segment (gen) = ephemeral_heap_segment;
22661                     generation_allocation_segment (gen) = ephemeral_heap_segment;
22662                 }
22663             }
22664         }
22665
22666         {
22667 #ifdef FEATURE_PREMORTEM_FINALIZATION
22668             finalize_queue->UpdatePromotedGenerations (condemned_gen_number,
22669                                                        (!settings.demotion && settings.promotion));
22670 #endif // FEATURE_PREMORTEM_FINALIZATION
22671
22672 #ifdef MULTIPLE_HEAPS
22673             dprintf(3, ("Joining after end of compaction"));
22674             gc_t_join.join(this, gc_join_adjust_handle_age_compact);
22675             if (gc_t_join.joined())
22676 #endif //MULTIPLE_HEAPS
22677             {
22678 #ifdef MULTIPLE_HEAPS
22679                 //join all threads to make sure they are synchronized
22680                 dprintf(3, ("Restarting after Promotion granted"));
22681                 gc_t_join.restart();
22682 #endif //MULTIPLE_HEAPS
22683             }
22684
22685             ScanContext sc;
22686             sc.thread_number = heap_number;
22687             sc.promotion = FALSE;
22688             sc.concurrent = FALSE;
22689             // new generations bounds are set can call this guy
22690             if (settings.promotion && !settings.demotion)
22691             {
22692                 dprintf (2, ("Promoting EE roots for gen %d",
22693                              condemned_gen_number));
22694                 GCScan::GcPromotionsGranted(condemned_gen_number,
22695                                                 max_generation, &sc);
22696             }
22697             else if (settings.demotion)
22698             {
22699                 dprintf (2, ("Demoting EE roots for gen %d",
22700                              condemned_gen_number));
22701                 GCScan::GcDemote (condemned_gen_number, max_generation, &sc);
22702             }
22703         }
22704
22705         {
22706             gen0_big_free_spaces = 0;
22707
22708             reset_pinned_queue_bos();
22709             unsigned int  gen_number = min (max_generation, 1 + condemned_gen_number);
22710             generation*  gen = generation_of (gen_number);
22711             uint8_t*  low = generation_allocation_start (generation_of (gen_number-1));
22712             uint8_t*  high =  heap_segment_allocated (ephemeral_heap_segment);
22713             
22714             while (!pinned_plug_que_empty_p())
22715             {
22716                 mark*  m = pinned_plug_of (deque_pinned_plug());
22717                 size_t len = pinned_len (m);
22718                 uint8_t*  arr = (pinned_plug (m) - len);
22719                 dprintf(3,("free [%Ix %Ix[ pin",
22720                             (size_t)arr, (size_t)arr + len));
22721                 if (len != 0)
22722                 {
22723                     assert (len >= Align (min_obj_size));
22724                     make_unused_array (arr, len);
22725                     // fix fully contained bricks + first one
22726                     // if the array goes beyond the first brick
22727                     size_t start_brick = brick_of (arr);
22728                     size_t end_brick = brick_of (arr + len);
22729                     if (end_brick != start_brick)
22730                     {
22731                         dprintf (3,
22732                                     ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix",
22733                                     start_brick, end_brick, (size_t)arr));
22734                         set_brick (start_brick,
22735                                     arr - brick_address (start_brick));
22736                         size_t brick = start_brick+1;
22737                         while (brick < end_brick)
22738                         {
22739                             set_brick (brick, start_brick - brick);
22740                             brick++;
22741                         }
22742                     }
22743
22744                     //when we take an old segment to make the new
22745                     //ephemeral segment. we can have a bunch of
22746                     //pinned plugs out of order going to the new ephemeral seg
22747                     //and then the next plugs go back to max_generation
22748                     if ((heap_segment_mem (ephemeral_heap_segment) <= arr) &&
22749                         (heap_segment_reserved (ephemeral_heap_segment) > arr))
22750                     {
22751
22752                         while ((low <= arr) && (high > arr))
22753                         {
22754                             gen_number--;
22755                             assert ((gen_number >= 1) || (demotion_low != MAX_PTR) ||
22756                                     settings.demotion || !settings.promotion);
22757                             dprintf (3, ("new free list generation %d", gen_number));
22758
22759                             gen = generation_of (gen_number);
22760                             if (gen_number >= 1)
22761                                 low = generation_allocation_start (generation_of (gen_number-1));
22762                             else
22763                                 low = high;
22764                         }
22765                     }
22766                     else
22767                     {
22768                         dprintf (3, ("new free list generation %d", max_generation));
22769                         gen_number = max_generation;
22770                         gen = generation_of (gen_number);
22771                     }
22772
22773                     dprintf(3,("threading it into generation %d", gen_number));
22774                     thread_gap (arr, len, gen);
22775                     add_gen_free (gen_number, len);
22776                 }
22777             }
22778         }
22779
22780 #ifdef _DEBUG
22781         for (int x = 0; x <= max_generation; x++)
22782         {
22783             assert (generation_allocation_start (generation_of (x)));
22784         }
22785 #endif //_DEBUG
22786
22787         if (!settings.demotion && settings.promotion)
22788         {
22789             //clear card for generation 1. generation 0 is empty
22790             clear_card_for_addresses (
22791                 generation_allocation_start (generation_of (1)),
22792                 generation_allocation_start (generation_of (0)));
22793         }
22794         if (settings.promotion && !settings.demotion)
22795         {
22796             uint8_t* start = generation_allocation_start (youngest_generation);
22797             MAYBE_UNUSED_VAR(start);
22798             assert (heap_segment_allocated (ephemeral_heap_segment) ==
22799                     (start + Align (size (start))));
22800         }
22801     }
22802     else
22803     {
22804         //force promotion for sweep
22805         settings.promotion = TRUE;
22806         settings.compaction = FALSE;
22807
22808         ScanContext sc;
22809         sc.thread_number = heap_number;
22810         sc.promotion = FALSE;
22811         sc.concurrent = FALSE;
22812
22813         dprintf (2, ("**** Doing Mark and Sweep GC****"));
22814
22815         if ((condemned_gen_number < max_generation))
22816         {
22817             generation_allocator (older_gen)->copy_from_alloc_list (r_free_list);
22818             generation_free_list_space (older_gen) = r_free_list_space;
22819             generation_free_obj_space (older_gen) = r_free_obj_space;
22820             generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated;
22821             generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated;
22822             generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated;
22823             generation_allocation_limit (older_gen) = r_allocation_limit;
22824             generation_allocation_pointer (older_gen) = r_allocation_pointer;
22825             generation_allocation_context_start_region (older_gen) = r_allocation_start_region;
22826             generation_allocation_segment (older_gen) = r_allocation_segment;
22827         }
22828
22829         if ((condemned_gen_number < max_generation))
22830         {
22831             // Fix the allocation area of the older generation
22832             fix_older_allocation_area (older_gen);
22833         }
22834
22835         GCToEEInterface::DiagWalkSurvivors(__this);
22836
22837         gen0_big_free_spaces = 0;
22838         make_free_lists (condemned_gen_number);
22839         recover_saved_pinned_info();
22840
22841 #ifdef FEATURE_PREMORTEM_FINALIZATION
22842         finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
22843 #endif // FEATURE_PREMORTEM_FINALIZATION
22844 // MTHTS: leave single thread for HT processing on plan_phase
22845 #ifdef MULTIPLE_HEAPS
22846         dprintf(3, ("Joining after end of sweep"));
22847         gc_t_join.join(this, gc_join_adjust_handle_age_sweep);
22848         if (gc_t_join.joined())
22849 #endif //MULTIPLE_HEAPS
22850         {
22851             GCScan::GcPromotionsGranted(condemned_gen_number,
22852                                             max_generation, &sc);
22853             if (condemned_gen_number >= (max_generation -1))
22854             {
22855 #ifdef MULTIPLE_HEAPS
22856                 for (int i = 0; i < n_heaps; i++)
22857                 {
22858                     g_heaps[i]->rearrange_heap_segments(FALSE);
22859                 }
22860 #else
22861                 rearrange_heap_segments(FALSE);
22862 #endif //MULTIPLE_HEAPS
22863             }
22864
22865 #ifdef MULTIPLE_HEAPS
22866             //join all threads to make sure they are synchronized
22867             dprintf(3, ("Restarting after Promotion granted"));
22868             gc_t_join.restart();
22869 #endif //MULTIPLE_HEAPS
22870         }
22871
22872 #ifdef _DEBUG
22873         for (int x = 0; x <= max_generation; x++)
22874         {
22875             assert (generation_allocation_start (generation_of (x)));
22876         }
22877 #endif //_DEBUG
22878
22879         //clear card for generation 1. generation 0 is empty
22880         clear_card_for_addresses (
22881             generation_allocation_start (generation_of (1)),
22882             generation_allocation_start (generation_of (0)));
22883         assert ((heap_segment_allocated (ephemeral_heap_segment) ==
22884                  (generation_allocation_start (youngest_generation) +
22885                   Align (min_obj_size))));
22886     }
22887
22888     //verify_partial();
22889 }
22890 #ifdef _PREFAST_
22891 #pragma warning(pop)
22892 #endif //_PREFAST_
22893
22894
22895 /*****************************
22896 Called after compact phase to fix all generation gaps
22897 ********************************/
22898 void gc_heap::fix_generation_bounds (int condemned_gen_number,
22899                                      generation* consing_gen)
22900 {
22901     UNREFERENCED_PARAMETER(consing_gen);
22902
22903     assert (generation_allocation_segment (consing_gen) ==
22904             ephemeral_heap_segment);
22905
22906     //assign the planned allocation start to the generation
22907     int gen_number = condemned_gen_number;
22908     int bottom_gen = 0;
22909
22910     while (gen_number >= bottom_gen)
22911     {
22912         generation*  gen = generation_of (gen_number);
22913         dprintf(3,("Fixing generation pointers for %Ix", gen_number));
22914         if ((gen_number < max_generation) && ephemeral_promotion)
22915         {
22916             make_unused_array (saved_ephemeral_plan_start[gen_number], 
22917                                saved_ephemeral_plan_start_size[gen_number]);
22918         }
22919         reset_allocation_pointers (gen, generation_plan_allocation_start (gen));
22920         make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen));
22921         dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen)));
22922         gen_number--;
22923     }
22924 #ifdef MULTIPLE_HEAPS
22925     if (ephemeral_promotion)
22926     {
22927         //we are creating a generation fault. set the cards.
22928         // and we are only doing this for multiple heaps because in the single heap scenario the 
22929         // new ephemeral generations will be empty and there'll be no need to set cards for the
22930         // old ephemeral generations that got promoted into max_generation.
22931         ptrdiff_t delta = 0;
22932 #ifdef SEG_MAPPING_TABLE
22933         heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
22934 #else //SEG_MAPPING_TABLE
22935         heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
22936 #endif //SEG_MAPPING_TABLE
22937
22938         assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
22939         size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
22940         size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]);
22941         while (card != end_card)
22942         {
22943             set_card (card);
22944             card++;
22945         }
22946     }
22947 #endif //MULTIPLE_HEAPS
22948     {
22949         alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
22950         //reset the allocated size
22951         uint8_t* start = generation_allocation_start (youngest_generation);
22952         MAYBE_UNUSED_VAR(start);
22953         if (settings.promotion && !settings.demotion)
22954         {
22955             assert ((start + Align (size (start))) ==
22956                     heap_segment_plan_allocated(ephemeral_heap_segment));
22957         }
22958
22959         heap_segment_allocated(ephemeral_heap_segment)=
22960             heap_segment_plan_allocated(ephemeral_heap_segment);
22961     }
22962 }
22963
22964 uint8_t* gc_heap::generation_limit (int gen_number)
22965 {
22966     if (settings.promotion)
22967     {
22968         if (gen_number <= 1)
22969             return heap_segment_reserved (ephemeral_heap_segment);
22970         else
22971             return generation_allocation_start (generation_of ((gen_number - 2)));
22972     }
22973     else
22974     {
22975         if (gen_number <= 0)
22976             return heap_segment_reserved (ephemeral_heap_segment);
22977         else
22978             return generation_allocation_start (generation_of ((gen_number - 1)));
22979     }
22980 }
22981
22982 BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number)
22983 {
22984     uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
22985     size_t size = Align (min_obj_size)*(condemned_gen_number+1);
22986     assert ((start + size) <=
22987             heap_segment_reserved (ephemeral_heap_segment));
22988     if ((start + size) >
22989         heap_segment_committed (ephemeral_heap_segment))
22990     {
22991         if (!grow_heap_segment (ephemeral_heap_segment, start + size))
22992         {
22993             return FALSE;
22994         }
22995     }
22996     return TRUE;
22997 }
22998
22999 uint8_t* gc_heap::allocate_at_end (size_t size)
23000 {
23001     uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
23002     size = Align (size);
23003     uint8_t* result = start;
23004     // only called to allocate a min obj so can't overflow here.
23005     assert ((start + size) <=
23006             heap_segment_reserved (ephemeral_heap_segment));
23007     //ensure_gap_allocation took care of it
23008     assert ((start + size) <=
23009             heap_segment_committed (ephemeral_heap_segment));
23010     heap_segment_allocated (ephemeral_heap_segment) += size;
23011     return result;
23012 }
23013
23014
23015 void gc_heap::make_free_lists (int condemned_gen_number)
23016 {
23017 #ifdef TIME_GC
23018     unsigned start;
23019     unsigned finish;
23020     start = GetCycleCount32();
23021 #endif //TIME_GC
23022
23023     //Promotion has to happen in sweep case.
23024     assert (settings.promotion);
23025
23026     generation* condemned_gen = generation_of (condemned_gen_number);
23027     uint8_t* start_address = generation_allocation_start (condemned_gen);
23028
23029     size_t  current_brick = brick_of (start_address);
23030     heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
23031
23032     PREFIX_ASSUME(current_heap_segment != NULL);
23033
23034     uint8_t*  end_address = heap_segment_allocated (current_heap_segment);
23035     size_t  end_brick = brick_of (end_address-1);
23036     make_free_args args;
23037     args.free_list_gen_number = min (max_generation, 1 + condemned_gen_number);
23038     args.current_gen_limit = (((condemned_gen_number == max_generation)) ?
23039                               MAX_PTR :
23040                               (generation_limit (args.free_list_gen_number)));
23041     args.free_list_gen = generation_of (args.free_list_gen_number);
23042     args.highest_plug = 0;
23043
23044     if ((start_address < end_address) ||
23045         (condemned_gen_number == max_generation))
23046     {
23047         while (1)
23048         {
23049             if ((current_brick > end_brick))
23050             {
23051                 if (args.current_gen_limit == MAX_PTR)
23052                 {
23053                     //We had an empty segment
23054                     //need to allocate the generation start
23055
23056                     generation* gen = generation_of (max_generation);
23057
23058                     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
23059
23060                     PREFIX_ASSUME(start_seg != NULL);
23061
23062                     uint8_t* gap = heap_segment_mem (start_seg);
23063
23064                     generation_allocation_start (gen) = gap;
23065                     heap_segment_allocated (start_seg) = gap + Align (min_obj_size);
23066                     make_unused_array (gap, Align (min_obj_size));
23067                     reset_allocation_pointers (gen, gap);
23068                     dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix",
23069                                  max_generation, (size_t)gap));
23070                     args.current_gen_limit = generation_limit (args.free_list_gen_number);
23071                 }
23072                 if (heap_segment_next_rw (current_heap_segment))
23073                 {
23074                     current_heap_segment = heap_segment_next_rw (current_heap_segment);
23075                     current_brick = brick_of (heap_segment_mem (current_heap_segment));
23076                     end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
23077
23078                     continue;
23079                 }
23080                 else
23081                 {
23082                     break;
23083                 }
23084             }
23085             {
23086                 int brick_entry =  brick_table [ current_brick ];
23087                 if ((brick_entry >= 0))
23088                 {
23089                     make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args);
23090                     dprintf(3,("Fixing brick entry %Ix to %Ix",
23091                                current_brick, (size_t)args.highest_plug));
23092                     set_brick (current_brick,
23093                                (args.highest_plug - brick_address (current_brick)));
23094                 }
23095                 else
23096                 {
23097                     if ((brick_entry > -32768))
23098                     {
23099
23100 #ifdef _DEBUG
23101                         ptrdiff_t offset = brick_of (args.highest_plug) - current_brick;
23102                         if ((brick_entry != -32767) && (! ((offset == brick_entry))))
23103                         {
23104                             assert ((brick_entry == -1));
23105                         }
23106 #endif //_DEBUG
23107                         //init to -1 for faster find_first_object
23108                         set_brick (current_brick, -1);
23109                     }
23110                 }
23111             }
23112             current_brick++;
23113         }
23114     }
23115     {
23116         int bottom_gen = 0;
23117         args.free_list_gen_number--;
23118         while (args.free_list_gen_number >= bottom_gen)
23119         {
23120             uint8_t*  gap = 0;
23121             generation* gen2 = generation_of (args.free_list_gen_number);
23122             gap = allocate_at_end (Align(min_obj_size));
23123             generation_allocation_start (gen2) = gap;
23124             reset_allocation_pointers (gen2, gap);
23125             dprintf(3,("Fixing generation start of %d to: %Ix",
23126                        args.free_list_gen_number, (size_t)gap));
23127             PREFIX_ASSUME(gap != NULL);
23128             make_unused_array (gap, Align (min_obj_size));
23129
23130             args.free_list_gen_number--;
23131         }
23132
23133         //reset the allocated size
23134         uint8_t* start2 = generation_allocation_start (youngest_generation);
23135         alloc_allocated = start2 + Align (size (start2));
23136     }
23137
23138 #ifdef TIME_GC
23139     finish = GetCycleCount32();
23140     sweep_time = finish - start;
23141 #endif //TIME_GC
23142 }
23143
23144 void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args)
23145 {
23146     assert ((tree != NULL));
23147     {
23148         int  right_node = node_right_child (tree);
23149         int left_node = node_left_child (tree);
23150         args->highest_plug = 0;
23151         if (! (0 == tree))
23152         {
23153             if (! (0 == left_node))
23154             {
23155                 make_free_list_in_brick (tree + left_node, args);
23156
23157             }
23158             {
23159                 uint8_t*  plug = tree;
23160                 size_t  gap_size = node_gap_size (tree);
23161                 uint8_t*  gap = (plug - gap_size);
23162                 dprintf (3,("Making free list %Ix len %d in %d",
23163                 //dprintf (3,("F: %Ix len %Ix in %d",
23164                         (size_t)gap, gap_size, args->free_list_gen_number));
23165                 args->highest_plug = tree;
23166 #ifdef SHORT_PLUGS
23167                 if (is_plug_padded (plug))
23168                 {
23169                     dprintf (3, ("%Ix padded", plug));
23170                     clear_plug_padded (plug);
23171                 }
23172 #endif //SHORT_PLUGS
23173             gen_crossing:
23174                 {
23175                     if ((args->current_gen_limit == MAX_PTR) ||
23176                         ((plug >= args->current_gen_limit) &&
23177                          ephemeral_pointer_p (plug)))
23178                     {
23179                         dprintf(3,(" Crossing Generation boundary at %Ix",
23180                                (size_t)args->current_gen_limit));
23181                         if (!(args->current_gen_limit == MAX_PTR))
23182                         {
23183                             args->free_list_gen_number--;
23184                             args->free_list_gen = generation_of (args->free_list_gen_number);
23185                         }
23186                         dprintf(3,( " Fixing generation start of %d to: %Ix",
23187                                 args->free_list_gen_number, (size_t)gap));
23188
23189                         reset_allocation_pointers (args->free_list_gen, gap);
23190                         args->current_gen_limit = generation_limit (args->free_list_gen_number);
23191
23192                         if ((gap_size >= (2*Align (min_obj_size))))
23193                         {
23194                             dprintf(3,(" Splitting the gap in two %Id left",
23195                                    gap_size));
23196                             make_unused_array (gap, Align(min_obj_size));
23197                             gap_size = (gap_size - Align(min_obj_size));
23198                             gap = (gap + Align(min_obj_size));
23199                         }
23200                         else
23201                         {
23202                             make_unused_array (gap, gap_size);
23203                             gap_size = 0;
23204                         }
23205                         goto gen_crossing;
23206                     }
23207                 }
23208
23209                 thread_gap (gap, gap_size, args->free_list_gen);
23210                 add_gen_free (args->free_list_gen->gen_num, gap_size);
23211             }
23212             if (! (0 == right_node))
23213             {
23214                 make_free_list_in_brick (tree + right_node, args);
23215             }
23216         }
23217     }
23218 }
23219
23220 void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation*  gen)
23221 {
23222     assert (generation_allocation_start (gen));
23223     if ((size > 0))
23224     {
23225         if ((gen->gen_num == 0) && (size > CLR_SIZE))
23226         {
23227             gen0_big_free_spaces += size;
23228         }
23229
23230         assert ((heap_segment_rw (generation_start_segment (gen))!=
23231                  ephemeral_heap_segment) ||
23232                 (gap_start > generation_allocation_start (gen)));
23233         // The beginning of a segment gap is not aligned
23234         assert (size >= Align (min_obj_size));
23235         make_unused_array (gap_start, size, 
23236                           (!settings.concurrent && (gen != youngest_generation)),
23237                           (gen->gen_num == max_generation));
23238         dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size));
23239
23240         if ((size >= min_free_list))
23241         {
23242             generation_free_list_space (gen) += size;
23243             generation_allocator (gen)->thread_item (gap_start, size);
23244         }
23245         else
23246         {
23247             generation_free_obj_space (gen) += size;
23248         }
23249     }
23250 }
23251
23252 void gc_heap::loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen)
23253 {
23254     assert (generation_allocation_start (gen));
23255     if (size >= min_free_list)
23256     {
23257         generation_free_list_space (gen) += size;
23258         generation_allocator (gen)->thread_item_front (gap_start, size);
23259     }
23260 }
23261
23262 void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp)
23263 {
23264     dprintf (3, ("Making unused array [%Ix, %Ix[",
23265         (size_t)x, (size_t)(x+size)));
23266     assert (size >= Align (min_obj_size));
23267
23268 //#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
23269 //    check_batch_mark_array_bits (x, x+size);
23270 //#endif //VERIFY_HEAP && BACKGROUND_GC
23271
23272     if (resetp)
23273         reset_memory (x, size);
23274
23275     ((CObjectHeader*)x)->SetFree(size);
23276
23277 #ifdef BIT64
23278
23279 #if BIGENDIAN
23280 #error "This won't work on big endian platforms"
23281 #endif
23282
23283     size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23284
23285     if (size_as_object < size)
23286     {
23287         //
23288         // If the size is more than 4GB, we need to create multiple objects because of
23289         // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array
23290         // size is ignored in regular object size computation.
23291         //
23292         uint8_t * tmp = x + size_as_object;
23293         size_t remaining_size = size - size_as_object;
23294
23295         while (remaining_size > UINT32_MAX)
23296         {
23297             // Make sure that there will be at least Align(min_obj_size) left
23298             size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) 
23299                 - Align (min_obj_size, get_alignment_constant (FALSE));
23300
23301             ((CObjectHeader*)tmp)->SetFree(current_size);
23302
23303             remaining_size -= current_size;
23304             tmp += current_size;
23305         }
23306
23307         ((CObjectHeader*)tmp)->SetFree(remaining_size);
23308     }
23309 #endif
23310
23311     if (clearp)
23312         clear_card_for_addresses (x, x + Align(size));
23313 }
23314
23315 // Clear memory set by make_unused_array.
23316 void gc_heap::clear_unused_array (uint8_t* x, size_t size)
23317 {
23318     // Also clear the sync block
23319     *(((PTR_PTR)x)-1) = 0;
23320
23321     ((CObjectHeader*)x)->UnsetFree();
23322
23323 #ifdef BIT64
23324
23325 #if BIGENDIAN
23326 #error "This won't work on big endian platforms"
23327 #endif
23328
23329     // The memory could have been cleared in the meantime. We have to mirror the algorithm
23330     // from make_unused_array since we cannot depend on the object sizes in memory.
23331     size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23332
23333     if (size_as_object < size)
23334     {
23335         uint8_t * tmp = x + size_as_object;
23336         size_t remaining_size = size - size_as_object;
23337
23338         while (remaining_size > UINT32_MAX)
23339         {
23340             size_t current_size = UINT32_MAX - get_alignment_constant (FALSE) 
23341                 - Align (min_obj_size, get_alignment_constant (FALSE));
23342
23343             ((CObjectHeader*)tmp)->UnsetFree();
23344
23345             remaining_size -= current_size;
23346             tmp += current_size;
23347         }
23348
23349         ((CObjectHeader*)tmp)->UnsetFree();
23350     }
23351 #else
23352     UNREFERENCED_PARAMETER(size);
23353 #endif
23354 }
23355
23356 inline
23357 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address)
23358 {
23359     uint8_t* candidate = 0;
23360     int cn;
23361     while (1)
23362     {
23363         if (tree < old_address)
23364         {
23365             if ((cn = node_right_child (tree)) != 0)
23366             {
23367                 assert (candidate < tree);
23368                 candidate = tree;
23369                 tree = tree + cn;
23370                 Prefetch (tree - 8);
23371                 continue;
23372             }
23373             else
23374                 break;
23375         }
23376         else if (tree > old_address)
23377         {
23378             if ((cn = node_left_child (tree)) != 0)
23379             {
23380                 tree = tree + cn;
23381                 Prefetch (tree - 8);
23382                 continue;
23383             }
23384             else
23385                 break;
23386         } else
23387             break;
23388     }
23389     if (tree <= old_address)
23390         return tree;
23391     else if (candidate)
23392         return candidate;
23393     else
23394         return tree;
23395 }
23396
23397 #ifdef FEATURE_BASICFREEZE
23398 bool gc_heap::frozen_object_p (Object* obj)
23399 {
23400     heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
23401     _ASSERTE(pSegment);
23402
23403     return heap_segment_read_only_p(pSegment);
23404 }
23405 #endif // FEATURE_BASICFREEZE
23406
23407 #ifdef FEATURE_REDHAWK
23408 // TODO: this was added on RH, we have not done perf runs to see if this is the right
23409 // thing to do for other versions of the CLR.
23410 inline
23411 #endif // FEATURE_REDHAWK
23412 void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
23413 {
23414     uint8_t* old_address = *pold_address;
23415     if (!((old_address >= gc_low) && (old_address < gc_high)))
23416 #ifdef MULTIPLE_HEAPS
23417     {
23418         UNREFERENCED_PARAMETER(thread);
23419         if (old_address == 0)
23420             return;
23421         gc_heap* hp = heap_of (old_address);
23422         if ((hp == this) ||
23423             !((old_address >= hp->gc_low) && (old_address < hp->gc_high)))
23424             return;
23425     }
23426 #else //MULTIPLE_HEAPS
23427         return ;
23428 #endif //MULTIPLE_HEAPS
23429     // delta translates old_address into address_gc (old_address);
23430     size_t  brick = brick_of (old_address);
23431     int    brick_entry =  brick_table [ brick ];
23432     uint8_t*  new_address = old_address;
23433     if (! ((brick_entry == 0)))
23434     {
23435     retry:
23436         {
23437             while (brick_entry < 0)
23438             {
23439                 brick = (brick + brick_entry);
23440                 brick_entry =  brick_table [ brick ];
23441             }
23442             uint8_t* old_loc = old_address;
23443
23444             uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
23445                                       old_loc);
23446             if ((node <= old_loc))
23447                 new_address = (old_address + node_relocation_distance (node));
23448             else
23449             {
23450                 if (node_left_p (node))
23451                 {
23452                     dprintf(3,(" L: %Ix", (size_t)node));
23453                     new_address = (old_address +
23454                                    (node_relocation_distance (node) +
23455                                     node_gap_size (node)));
23456                 }
23457                 else
23458                 {
23459                     brick = brick - 1;
23460                     brick_entry =  brick_table [ brick ];
23461                     goto retry;
23462                 }
23463             }
23464         }
23465
23466         *pold_address = new_address;
23467         return;
23468     }
23469
23470 #ifdef FEATURE_LOH_COMPACTION
23471     if (loh_compacted_p
23472 #ifdef FEATURE_BASICFREEZE
23473         && !frozen_object_p((Object*)old_address)
23474 #endif // FEATURE_BASICFREEZE
23475         )
23476     {
23477         *pold_address = old_address + loh_node_relocation_distance (old_address);
23478     }
23479     else
23480 #endif //FEATURE_LOH_COMPACTION
23481     {
23482         *pold_address = new_address;
23483     }
23484 }
23485
23486 inline void 
23487 gc_heap::check_class_object_demotion (uint8_t* obj)
23488 {
23489 #ifdef COLLECTIBLE_CLASS
23490     if (is_collectible(obj))
23491     {
23492         check_class_object_demotion_internal (obj);
23493     }
23494 #else
23495     UNREFERENCED_PARAMETER(obj);
23496 #endif //COLLECTIBLE_CLASS
23497 }
23498
23499 #ifdef COLLECTIBLE_CLASS
23500 NOINLINE void 
23501 gc_heap::check_class_object_demotion_internal (uint8_t* obj)
23502 {
23503     if (settings.demotion)
23504     {
23505 #ifdef MULTIPLE_HEAPS
23506         // We set the card without checking the demotion range 'cause at this point
23507         // the handle that points to the loader allocator object may or may not have
23508         // been relocated by other GC threads. 
23509         set_card (card_of (obj));
23510 #else
23511         THREAD_FROM_HEAP;
23512         uint8_t* class_obj = get_class_object (obj);
23513         dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj));
23514         uint8_t* temp_class_obj = class_obj;
23515         uint8_t** temp = &temp_class_obj;
23516         relocate_address (temp THREAD_NUMBER_ARG);
23517
23518         check_demotion_helper (temp, obj);
23519 #endif //MULTIPLE_HEAPS
23520     }
23521 }
23522
23523 #endif //COLLECTIBLE_CLASS
23524
23525 inline void
23526 gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj)
23527 {
23528     // detect if we are demoting an object
23529     if ((*pval < demotion_high) &&
23530         (*pval >= demotion_low))
23531     {
23532         dprintf(3, ("setting card %Ix:%Ix",
23533                     card_of((uint8_t*)pval),
23534                     (size_t)pval));
23535
23536         set_card (card_of (parent_obj));
23537     }
23538 #ifdef MULTIPLE_HEAPS
23539     else if (settings.demotion)
23540     {
23541         dprintf (4, ("Demotion active, computing heap_of object"));
23542         gc_heap* hp = heap_of (*pval);
23543         if ((*pval < hp->demotion_high) &&
23544             (*pval >= hp->demotion_low))
23545         {
23546             dprintf(3, ("setting card %Ix:%Ix",
23547                         card_of((uint8_t*)pval),
23548                         (size_t)pval));
23549
23550             set_card (card_of (parent_obj));
23551         }
23552     }
23553 #endif //MULTIPLE_HEAPS
23554 }
23555
23556 inline void
23557 gc_heap::reloc_survivor_helper (uint8_t** pval)
23558 {
23559     THREAD_FROM_HEAP;
23560     relocate_address (pval THREAD_NUMBER_ARG);
23561
23562     check_demotion_helper (pval, (uint8_t*)pval);
23563 }
23564
23565 inline void
23566 gc_heap::relocate_obj_helper (uint8_t* x, size_t s)
23567 {
23568     THREAD_FROM_HEAP;
23569     if (contain_pointers (x))
23570     {
23571         dprintf (3, ("$%Ix$", (size_t)x));
23572
23573         go_through_object_nostart (method_table(x), x, s, pval,
23574                             {
23575                                 uint8_t* child = *pval;
23576                                 reloc_survivor_helper (pval);
23577                                 if (child)
23578                                 {
23579                                     dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval));
23580                                 }
23581                             });
23582
23583     }
23584     check_class_object_demotion (x);
23585 }
23586
23587 inline 
23588 void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc)
23589 {
23590     THREAD_FROM_HEAP;
23591
23592     uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0);
23593     relocate_address (address_to_reloc THREAD_NUMBER_ARG);
23594     if (address_to_reloc)
23595     {
23596         dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc));
23597     }
23598
23599     //check_demotion_helper (current_saved_info_to_relocate, (uint8_t*)pval);
23600     uint8_t* relocated_addr = *address_to_reloc;
23601     if ((relocated_addr < demotion_high) &&
23602         (relocated_addr >= demotion_low))
23603     {
23604         dprintf (3, ("set card for location %Ix(%Ix)",
23605                     (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
23606
23607         set_card (card_of ((uint8_t*)address_to_set_card));
23608     }
23609 #ifdef MULTIPLE_HEAPS
23610     else if (settings.demotion)
23611     {
23612         gc_heap* hp = heap_of (relocated_addr);
23613         if ((relocated_addr < hp->demotion_high) &&
23614             (relocated_addr >= hp->demotion_low))
23615         {
23616             dprintf (3, ("%Ix on h%d, set card for location %Ix(%Ix)",
23617                         relocated_addr, hp->heap_number, (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
23618
23619             set_card (card_of ((uint8_t*)address_to_set_card));
23620         }
23621     }
23622 #endif //MULTIPLE_HEAPS
23623 }
23624
23625 void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry)
23626 {
23627     THREAD_FROM_HEAP;
23628     uint8_t* plug = pinned_plug (pinned_plug_entry);
23629     uint8_t* pre_plug_start = plug - sizeof (plug_and_gap);
23630     // Note that we need to add one ptr size here otherwise we may not be able to find the relocated
23631     // address. Consider this scenario: 
23632     // gen1 start | 3-ptr sized NP | PP
23633     // 0          | 0x18           | 0x30
23634     // If we are asking for the reloc address of 0x10 we will AV in relocate_address because
23635     // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick
23636     // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree). 
23637     pre_plug_start += sizeof (uint8_t*);
23638     uint8_t** old_address = &pre_plug_start;
23639
23640     uint8_t* old_val = (old_address ? *old_address : 0);
23641     relocate_address (old_address THREAD_NUMBER_ARG);
23642     if (old_address)
23643     {
23644         dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix", 
23645             (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*))));
23646     }
23647
23648     pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*));
23649 }
23650
23651 inline
23652 void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned)
23653 {
23654     THREAD_FROM_HEAP;
23655     uint8_t* plug = pinned_plug (pinned_plug_entry);
23656
23657     if (!is_pinned)
23658     {
23659         //// Temporary - we just wanna make sure we are doing things right when padding is needed.
23660         //if ((x + s) < plug)
23661         //{
23662         //    dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix", 
23663         //        x, (x + s), (plug- (x + s)), plug));
23664         //    GCToOSInterface::DebugBreak();
23665         //}
23666
23667         relocate_pre_plug_info (pinned_plug_entry);
23668     }
23669
23670     verify_pins_with_post_plug_info("after relocate_pre_plug_info");
23671
23672     uint8_t* saved_plug_info_start = 0;
23673     uint8_t** saved_info_to_relocate = 0;
23674
23675     if (is_pinned)
23676     {
23677         saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start());
23678         saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
23679     }
23680     else
23681     {
23682         saved_plug_info_start = (plug - sizeof (plug_and_gap));
23683         saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
23684     }
23685     
23686     uint8_t** current_saved_info_to_relocate = 0;
23687     uint8_t* child = 0;
23688
23689     dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end));
23690
23691     if (contain_pointers (x))
23692     {
23693         dprintf (3,("$%Ix$", (size_t)x));
23694
23695         go_through_object_nostart (method_table(x), x, s, pval,
23696         {
23697             dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval));
23698
23699             if ((uint8_t*)pval >= end)
23700             {
23701                 current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**);
23702                 child = *current_saved_info_to_relocate;
23703                 reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate);
23704                 dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix",
23705                     (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate));
23706             }
23707             else
23708             {
23709                 reloc_survivor_helper (pval);
23710             }
23711         });
23712     }
23713
23714     check_class_object_demotion (x);
23715 }
23716
23717 void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
23718 {
23719     uint8_t*  x = plug;
23720     while (x < plug_end)
23721     {
23722         size_t s = size (x);
23723         uint8_t* next_obj = x + Align (s);
23724         Prefetch (next_obj);
23725         relocate_obj_helper (x, s);
23726         assert (s > 0);
23727         x = next_obj;
23728     }
23729 }
23730
23731 // if we expanded, right now we are not handling it as We are not saving the new reloc info.
23732 void gc_heap::verify_pins_with_post_plug_info (const char* msg)
23733 {
23734 #if defined  (_DEBUG) && defined (VERIFY_HEAP)
23735     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
23736     {
23737         if (!verify_pinned_queue_p)
23738             return;
23739
23740         if (settings.heap_expansion)
23741             return;
23742
23743         for (size_t i = 0; i < mark_stack_tos; i++)
23744         {
23745             mark& m = mark_stack_array[i];
23746
23747             mark* pinned_plug_entry = pinned_plug_of(i);
23748
23749             if (pinned_plug_entry->has_post_plug_info() && 
23750                 pinned_plug_entry->post_short_p() && 
23751                 (pinned_plug_entry->saved_post_plug_debug.gap != 1))
23752             {
23753                 uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap);
23754                 // object after pin
23755                 dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d", 
23756                     next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj),
23757                     (int)node_left_child (next_obj), (int)node_right_child (next_obj)));
23758
23759                 size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug);
23760
23761                 if (node_gap_size (next_obj) != *post_plug_debug)
23762                 {
23763                     dprintf (3, ("obj: %Ix gap should be %Ix but it is %Ix", 
23764                         next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj))));
23765                     FATAL_GC_ERROR();
23766                 }
23767                 post_plug_debug++;
23768                 // can't do node_relocation_distance here as it clears the left bit.
23769                 //if (node_relocation_distance (next_obj) != *post_plug_debug)
23770                 if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug)
23771                 {
23772                     dprintf (3, ("obj: %Ix reloc should be %Ix but it is %Ix", 
23773                         next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj))));
23774                     FATAL_GC_ERROR();
23775                 }
23776                 if (node_left_child (next_obj) > 0)
23777                 {
23778                     dprintf (3, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj))));
23779                     FATAL_GC_ERROR();
23780                 }
23781             }
23782         }
23783
23784         dprintf (3, ("%s verified", msg));
23785     }
23786 #else // _DEBUG && VERIFY_HEAP
23787     UNREFERENCED_PARAMETER(msg);
23788 #endif // _DEBUG && VERIFY_HEAP
23789 }
23790
23791 #ifdef COLLECTIBLE_CLASS
23792 // We don't want to burn another ptr size space for pinned plugs to record this so just 
23793 // set the card unconditionally for collectible objects if we are demoting.
23794 inline void
23795 gc_heap::unconditional_set_card_collectible (uint8_t* obj)
23796 {
23797     if (settings.demotion)
23798     {
23799         set_card (card_of (obj));
23800     }
23801 }
23802 #endif //COLLECTIBLE_CLASS
23803
23804 void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry)
23805 {
23806     uint8_t*  x = plug;
23807     uint8_t* p_plug = pinned_plug (pinned_plug_entry);
23808     BOOL is_pinned = (plug == p_plug);
23809     BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p());
23810
23811     plug_end += sizeof (gap_reloc_pair);
23812
23813     //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not")));
23814     dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not")));
23815
23816     verify_pins_with_post_plug_info("begin reloc short surv");
23817
23818     while (x < plug_end)
23819     {
23820         if (check_short_obj_p && ((plug_end - x) < min_pre_pin_obj_size))
23821         {
23822             dprintf (3, ("last obj %Ix is short", x));
23823
23824             if (is_pinned)
23825             {
23826 #ifdef COLLECTIBLE_CLASS
23827                 if (pinned_plug_entry->post_short_collectible_p())
23828                     unconditional_set_card_collectible (x);
23829 #endif //COLLECTIBLE_CLASS
23830
23831                 // Relocate the saved references based on bits set.
23832                 uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start());
23833                 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
23834                 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
23835                 {
23836                     if (pinned_plug_entry->post_short_bit_p (i))
23837                     {
23838                         reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
23839                     }
23840                 }
23841             }
23842             else
23843             {
23844 #ifdef COLLECTIBLE_CLASS
23845                 if (pinned_plug_entry->pre_short_collectible_p())
23846                     unconditional_set_card_collectible (x);
23847 #endif //COLLECTIBLE_CLASS
23848
23849                 relocate_pre_plug_info (pinned_plug_entry);
23850
23851                 // Relocate the saved references based on bits set.
23852                 uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap));
23853                 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
23854                 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
23855                 {
23856                     if (pinned_plug_entry->pre_short_bit_p (i))
23857                     {
23858                         reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
23859                     }
23860                 }
23861             }
23862
23863             break;
23864         }
23865
23866         size_t s = size (x);
23867         uint8_t* next_obj = x + Align (s);
23868         Prefetch (next_obj);
23869
23870         if (next_obj >= plug_end) 
23871         {
23872             dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix", 
23873                 next_obj, plug, plug_end));
23874
23875             verify_pins_with_post_plug_info("before reloc short obj");
23876
23877             relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned);
23878         }
23879         else
23880         {
23881             relocate_obj_helper (x, s);
23882         }
23883
23884         assert (s > 0);
23885         x = next_obj;
23886     }
23887
23888     verify_pins_with_post_plug_info("end reloc short surv");
23889 }
23890
23891 void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
23892                                           BOOL check_last_object_p, 
23893                                           mark* pinned_plug_entry)
23894 {
23895     //dprintf(3,("Relocating pointers in Plug [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
23896     dprintf (3,("RP: [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
23897
23898     if (check_last_object_p)
23899     {
23900         relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry);
23901     }
23902     else
23903     {
23904         relocate_survivor_helper (plug, plug_end);
23905     }
23906 }
23907
23908 void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args)
23909 {
23910     assert ((tree != NULL));
23911
23912     dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix",
23913         tree, args->last_plug, 
23914         (tree + node_left_child (tree)),
23915         (tree + node_right_child (tree)),
23916         node_gap_size (tree)));
23917
23918     if (node_left_child (tree))
23919     {
23920         relocate_survivors_in_brick (tree + node_left_child (tree), args);
23921     }
23922     {
23923         uint8_t*  plug = tree;
23924         BOOL   has_post_plug_info_p = FALSE;
23925         BOOL   has_pre_plug_info_p = FALSE;
23926
23927         if (tree == oldest_pinned_plug)
23928         {
23929             args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
23930                                                                &has_post_plug_info_p);
23931             assert (tree == pinned_plug (args->pinned_plug_entry));
23932
23933             dprintf (3, ("tree is the oldest pin: %Ix", tree));
23934         }
23935         if (args->last_plug)
23936         {
23937             size_t  gap_size = node_gap_size (tree);
23938             uint8_t*  gap = (plug - gap_size);
23939             dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size));
23940             assert (gap_size >= Align (min_obj_size));
23941             uint8_t*  last_plug_end = gap;
23942
23943             BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
23944
23945             {
23946                 relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry);
23947             }
23948         }
23949         else
23950         {
23951             assert (!has_pre_plug_info_p);
23952         }
23953
23954         args->last_plug = plug;
23955         args->is_shortened = has_post_plug_info_p;
23956         if (has_post_plug_info_p)
23957         {
23958             dprintf (3, ("setting %Ix as shortened", plug));
23959         }
23960         dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0)));
23961     }
23962     if (node_right_child (tree))
23963     {
23964         relocate_survivors_in_brick (tree + node_right_child (tree), args);
23965     }
23966 }
23967
23968 inline
23969 void gc_heap::update_oldest_pinned_plug()
23970 {
23971     oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin()));
23972 }
23973
23974 void gc_heap::relocate_survivors (int condemned_gen_number,
23975                                   uint8_t* first_condemned_address)
23976 {
23977     generation* condemned_gen = generation_of (condemned_gen_number);
23978     uint8_t*  start_address = first_condemned_address;
23979     size_t  current_brick = brick_of (start_address);
23980     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
23981
23982     PREFIX_ASSUME(current_heap_segment != NULL);
23983
23984     uint8_t*  end_address = 0;
23985
23986     reset_pinned_queue_bos();
23987     update_oldest_pinned_plug();
23988     
23989     end_address = heap_segment_allocated (current_heap_segment);
23990
23991     size_t  end_brick = brick_of (end_address - 1);
23992     relocate_args args;
23993     args.low = gc_low;
23994     args.high = gc_high;
23995     args.is_shortened = FALSE;
23996     args.pinned_plug_entry = 0;
23997     args.last_plug = 0;
23998     while (1)
23999     {
24000         if (current_brick > end_brick)
24001         {
24002             if (args.last_plug)
24003             {
24004                 {
24005                     assert (!(args.is_shortened));
24006                     relocate_survivors_in_plug (args.last_plug,
24007                                                 heap_segment_allocated (current_heap_segment),
24008                                                 args.is_shortened, 
24009                                                 args.pinned_plug_entry);
24010                 }
24011
24012                 args.last_plug = 0;
24013             }
24014
24015             if (heap_segment_next_rw (current_heap_segment))
24016             {
24017                 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24018                 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24019                 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24020                 continue;
24021             }
24022             else
24023             {
24024                 break;
24025             }
24026         }
24027         {
24028             int brick_entry =  brick_table [ current_brick ];
24029
24030             if (brick_entry >= 0)
24031             {
24032                 relocate_survivors_in_brick (brick_address (current_brick) +
24033                                              brick_entry -1,
24034                                              &args);
24035             }
24036         }
24037         current_brick++;
24038     }
24039 }
24040
24041 void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
24042 {
24043     if (check_last_object_p)
24044     {
24045         size += sizeof (gap_reloc_pair);
24046         mark* entry = args->pinned_plug_entry;
24047
24048         if (args->is_shortened)
24049         {
24050             assert (entry->has_post_plug_info());
24051             entry->swap_post_plug_and_saved_for_profiler();
24052         }
24053         else
24054         {
24055             assert (entry->has_pre_plug_info());
24056             entry->swap_pre_plug_and_saved_for_profiler();
24057         }
24058     }
24059
24060     ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
24061     STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
24062     ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
24063
24064     (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);
24065
24066     if (check_last_object_p)
24067     {
24068         mark* entry = args->pinned_plug_entry;
24069
24070         if (args->is_shortened)
24071         {
24072             entry->swap_post_plug_and_saved_for_profiler();
24073         }
24074         else
24075         {
24076             entry->swap_pre_plug_and_saved_for_profiler();
24077         }
24078     }
24079 }
24080
24081 void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
24082 {
24083     assert ((tree != NULL));
24084     if (node_left_child (tree))
24085     {
24086         walk_relocation_in_brick (tree + node_left_child (tree), args);
24087     }
24088
24089     uint8_t*  plug = tree;
24090     BOOL   has_pre_plug_info_p = FALSE;
24091     BOOL   has_post_plug_info_p = FALSE;
24092
24093     if (tree == oldest_pinned_plug)
24094     {
24095         args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24096                                                            &has_post_plug_info_p);
24097         assert (tree == pinned_plug (args->pinned_plug_entry));
24098     }
24099
24100     if (args->last_plug != 0)
24101     {
24102         size_t gap_size = node_gap_size (tree);
24103         uint8_t*  gap = (plug - gap_size);
24104         uint8_t*  last_plug_end = gap;
24105         size_t last_plug_size = (last_plug_end - args->last_plug);
24106         dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", 
24107             tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
24108         
24109         BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24110         if (!check_last_object_p)
24111         {
24112             assert (last_plug_size >= Align (min_obj_size));
24113         }
24114
24115         walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
24116     }
24117     else
24118     {
24119         assert (!has_pre_plug_info_p);
24120     }
24121
24122     dprintf (3, ("set args last plug to plug: %Ix", plug));
24123     args->last_plug = plug;
24124     args->is_shortened = has_post_plug_info_p;
24125
24126     if (node_right_child (tree))
24127     {
24128         walk_relocation_in_brick (tree + node_right_child (tree), args);
24129     }
24130 }
24131
24132 void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
24133 {
24134     generation* condemned_gen = generation_of (settings.condemned_generation);
24135     uint8_t*  start_address = generation_allocation_start (condemned_gen);
24136     size_t  current_brick = brick_of (start_address);
24137     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24138
24139     PREFIX_ASSUME(current_heap_segment != NULL);
24140
24141     reset_pinned_queue_bos();
24142     update_oldest_pinned_plug();
24143     size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24144     walk_relocate_args args;
24145     args.is_shortened = FALSE;
24146     args.pinned_plug_entry = 0;
24147     args.last_plug = 0;
24148     args.profiling_context = profiling_context;
24149     args.fn = fn;
24150
24151     while (1)
24152     {
24153         if (current_brick > end_brick)
24154         {
24155             if (args.last_plug)
24156             {
24157                 walk_plug (args.last_plug, 
24158                            (heap_segment_allocated (current_heap_segment) - args.last_plug), 
24159                            args.is_shortened,
24160                            &args);
24161                 args.last_plug = 0;
24162             }
24163             if (heap_segment_next_rw (current_heap_segment))
24164             {
24165                 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24166                 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24167                 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24168                 continue;
24169             }
24170             else
24171             {
24172                 break;
24173             }
24174         }
24175         {
24176             int brick_entry =  brick_table [ current_brick ];
24177             if (brick_entry >= 0)
24178             {
24179                 walk_relocation_in_brick (brick_address (current_brick) +
24180                                           brick_entry - 1,
24181                                           &args);
24182             }
24183         }
24184         current_brick++;
24185     }
24186 }
24187
24188 void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
24189 {
24190     if (type == walk_for_gc)
24191         walk_survivors_relocation (context, fn);
24192 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24193     else if (type == walk_for_bgc)
24194         walk_survivors_for_bgc (context, fn);
24195 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
24196     else if (type == walk_for_loh)
24197         walk_survivors_for_loh (context, fn);
24198     else
24199         assert (!"unknown type!");
24200 }
24201
24202 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24203 void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
24204 {
24205     // This should only be called for BGCs
24206     assert(settings.concurrent);
24207
24208     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
24209
24210     BOOL small_object_segments = TRUE;
24211     int align_const = get_alignment_constant (small_object_segments);
24212
24213     while (1)
24214     {
24215         if (seg == 0)
24216         {
24217             if (small_object_segments)
24218             {
24219                 //switch to large segment
24220                 small_object_segments = FALSE;
24221
24222                 align_const = get_alignment_constant (small_object_segments);
24223                 seg = heap_segment_rw (generation_start_segment (large_object_generation));
24224
24225                 PREFIX_ASSUME(seg != NULL);
24226
24227                 continue;
24228             }
24229             else 
24230                 break;
24231         }
24232
24233         uint8_t* o = heap_segment_mem (seg);
24234         uint8_t* end = heap_segment_allocated (seg);
24235
24236         while (o < end)
24237         {
24238             if (method_table(o) == g_gc_pFreeObjectMethodTable)
24239             {
24240                 o += Align (size (o), align_const);
24241                 continue;
24242             }
24243
24244             // It's survived. Make a fake plug, starting at o,
24245             // and send the event
24246
24247             uint8_t* plug_start = o;
24248
24249             while (method_table(o) != g_gc_pFreeObjectMethodTable)
24250             {
24251                 o += Align (size (o), align_const);
24252                 if (o >= end)
24253                 {
24254                     break;
24255                 }
24256             }
24257                 
24258             uint8_t* plug_end = o;
24259
24260             fn (plug_start, 
24261                 plug_end,
24262                 0,              // Reloc distance == 0 as this is non-compacting
24263                 profiling_context,
24264                 false,          // Non-compacting
24265                 true);          // BGC
24266         }
24267
24268         seg = heap_segment_next (seg);
24269     }
24270 }
24271 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24272
24273 void gc_heap::relocate_phase (int condemned_gen_number,
24274                               uint8_t* first_condemned_address)
24275 {
24276     ScanContext sc;
24277     sc.thread_number = heap_number;
24278     sc.promotion = FALSE;
24279     sc.concurrent = FALSE;
24280
24281
24282 #ifdef TIME_GC
24283         unsigned start;
24284         unsigned finish;
24285         start = GetCycleCount32();
24286 #endif //TIME_GC
24287
24288 //  %type%  category = quote (relocate);
24289     dprintf (2,("---- Relocate phase -----"));
24290
24291 #ifdef MULTIPLE_HEAPS
24292     //join all threads to make sure they are synchronized
24293     dprintf(3, ("Joining after end of plan"));
24294     gc_t_join.join(this, gc_join_begin_relocate_phase);
24295     if (gc_t_join.joined())
24296 #endif //MULTIPLE_HEAPS
24297
24298     {
24299 #ifdef MULTIPLE_HEAPS
24300
24301         //join all threads to make sure they are synchronized
24302         dprintf(3, ("Restarting for relocation"));
24303         gc_t_join.restart();
24304 #endif //MULTIPLE_HEAPS
24305     }
24306
24307     dprintf(3,("Relocating roots"));
24308     GCScan::GcScanRoots(GCHeap::Relocate,
24309                             condemned_gen_number, max_generation, &sc);
24310
24311     verify_pins_with_post_plug_info("after reloc stack");
24312
24313 #ifdef BACKGROUND_GC
24314     if (recursive_gc_sync::background_running_p())
24315     {
24316         scan_background_roots (GCHeap::Relocate, heap_number, &sc);
24317     }
24318 #endif //BACKGROUND_GC
24319
24320     if (condemned_gen_number != max_generation)
24321     {
24322         dprintf(3,("Relocating cross generation pointers"));
24323         mark_through_cards_for_segments (&gc_heap::relocate_address, TRUE);
24324         verify_pins_with_post_plug_info("after reloc cards");
24325     }
24326     if (condemned_gen_number != max_generation)
24327     {
24328         dprintf(3,("Relocating cross generation pointers for large objects"));
24329         mark_through_cards_for_large_objects (&gc_heap::relocate_address, TRUE);
24330     }
24331     else
24332     {
24333 #ifdef FEATURE_LOH_COMPACTION
24334         if (loh_compacted_p)
24335         {
24336             assert (settings.condemned_generation == max_generation);
24337             relocate_in_loh_compact();
24338         }
24339         else
24340 #endif //FEATURE_LOH_COMPACTION
24341         {
24342             relocate_in_large_objects ();
24343         }
24344     }
24345     {
24346         dprintf(3,("Relocating survivors"));
24347         relocate_survivors (condemned_gen_number,
24348                             first_condemned_address);
24349     }
24350
24351 #ifdef FEATURE_PREMORTEM_FINALIZATION
24352         dprintf(3,("Relocating finalization data"));
24353         finalize_queue->RelocateFinalizationData (condemned_gen_number,
24354                                                        __this);
24355 #endif // FEATURE_PREMORTEM_FINALIZATION
24356
24357
24358 // MTHTS
24359     {
24360         dprintf(3,("Relocating handle table"));
24361         GCScan::GcScanHandles(GCHeap::Relocate,
24362                                   condemned_gen_number, max_generation, &sc);
24363     }
24364
24365 #ifdef MULTIPLE_HEAPS
24366     //join all threads to make sure they are synchronized
24367     dprintf(3, ("Joining after end of relocation"));
24368     gc_t_join.join(this, gc_join_relocate_phase_done);
24369
24370 #endif //MULTIPLE_HEAPS
24371
24372 #ifdef TIME_GC
24373         finish = GetCycleCount32();
24374         reloc_time = finish - start;
24375 #endif //TIME_GC
24376
24377     dprintf(2,( "---- End of Relocate phase ----"));
24378 }
24379
24380 // This compares to see if tree is the current pinned plug and returns info
24381 // for this pinned plug. Also advances the pinned queue if that's the case.
24382 //
24383 // We don't change the values of the plug info if tree is not the same as 
24384 // the current pinned plug - the caller is responsible for setting the right
24385 // values to begin with.
24386 //
24387 // POPO TODO: We are keeping this temporarily as this is also used by realloc 
24388 // where it passes FALSE to deque_p, change it to use the same optimization 
24389 // as relocate. Not as essential since realloc is already a slow path.
24390 mark* gc_heap::get_next_pinned_entry (uint8_t* tree,
24391                                       BOOL* has_pre_plug_info_p, 
24392                                       BOOL* has_post_plug_info_p,
24393                                       BOOL deque_p)
24394 {
24395     if (!pinned_plug_que_empty_p())
24396     {
24397         mark* oldest_entry = oldest_pin();
24398         uint8_t* oldest_plug = pinned_plug (oldest_entry);
24399         if (tree == oldest_plug)
24400         {
24401             *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
24402             *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24403
24404             if (deque_p)
24405             {
24406                 deque_pinned_plug();
24407             }
24408
24409             dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d", 
24410                 tree, 
24411                 (*has_pre_plug_info_p ? 1 : 0),
24412                 (*has_post_plug_info_p ? 1 : 0)));
24413
24414             return oldest_entry;
24415         }
24416     }
24417
24418     return NULL;
24419 }
24420
24421 // This also deques the oldest entry and update the oldest plug
24422 mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, 
24423                                         BOOL* has_post_plug_info_p)
24424 {
24425     mark* oldest_entry = oldest_pin();
24426     *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
24427     *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24428
24429     deque_pinned_plug();
24430     update_oldest_pinned_plug();
24431     return oldest_entry;
24432 }
24433
24434 inline
24435 void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24436 {
24437     if (copy_cards_p)
24438         copy_cards_for_addresses (dest, src, len);
24439     else
24440         clear_card_for_addresses (dest, dest + len);
24441 }
24442
24443 // POPO TODO: We should actually just recover the artifically made gaps here..because when we copy
24444 // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way
24445 // we won't need to individually recover each overwritten part of plugs.
24446 inline
24447 void  gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24448 {
24449     if (dest != src)
24450     {
24451 #ifdef BACKGROUND_GC
24452         if (current_c_gc_state == c_gc_state_marking) 
24453         {
24454             //TODO: should look to see whether we should consider changing this
24455             // to copy a consecutive region of the mark array instead.
24456             copy_mark_bits_for_addresses (dest, src, len);
24457         }
24458 #endif //BACKGROUND_GC
24459         //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24460         dprintf(3,(" mc: [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24461         memcopy (dest - plug_skew, src - plug_skew, len);
24462 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24463         if (SoftwareWriteWatch::IsEnabledForGCHeap())
24464         {
24465             // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC
24466             // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the
24467             // object at (src + len), so it can be ignored anyway.
24468             SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew);
24469         }
24470 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24471         copy_cards_range (dest, src, len, copy_cards_p);
24472     }
24473 }
24474
24475 void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args)
24476 {
24477     args->print();
24478     uint8_t* reloc_plug = plug + args->last_plug_relocation;
24479
24480     if (check_last_object_p)
24481     {
24482         size += sizeof (gap_reloc_pair);
24483         mark* entry = args->pinned_plug_entry;
24484
24485         if (args->is_shortened)
24486         {
24487             assert (entry->has_post_plug_info());
24488             entry->swap_post_plug_and_saved();
24489         }
24490         else
24491         {
24492             assert (entry->has_pre_plug_info());
24493             entry->swap_pre_plug_and_saved();
24494         }
24495     }
24496
24497     int  old_brick_entry =  brick_table [brick_of (plug)];
24498
24499     assert (node_relocation_distance (plug) == args->last_plug_relocation);
24500
24501 #ifdef FEATURE_STRUCTALIGN
24502     ptrdiff_t alignpad = node_alignpad(plug);
24503     if (alignpad)
24504     {
24505         make_unused_array (reloc_plug - alignpad, alignpad);
24506         if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug))
24507         {
24508             // The alignment padding is straddling one or more bricks;
24509             // it has to be the last "object" of its first brick.
24510             fix_brick_to_highest (reloc_plug - alignpad, reloc_plug);
24511         }
24512     }
24513 #else // FEATURE_STRUCTALIGN
24514     size_t unused_arr_size = 0; 
24515     BOOL  already_padded_p = FALSE;
24516 #ifdef SHORT_PLUGS
24517     if (is_plug_padded (plug))
24518     {
24519         already_padded_p = TRUE;
24520         clear_plug_padded (plug);
24521         unused_arr_size = Align (min_obj_size);
24522     }
24523 #endif //SHORT_PLUGS
24524     if (node_realigned (plug))
24525     {
24526         unused_arr_size += switch_alignment_size (already_padded_p);
24527     }
24528
24529     if (unused_arr_size != 0) 
24530     {
24531         make_unused_array (reloc_plug - unused_arr_size, unused_arr_size);
24532
24533         if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug))
24534         {
24535             dprintf (3, ("fix B for padding: %Id: %Ix->%Ix", 
24536                 unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug));
24537             // The alignment padding is straddling one or more bricks;
24538             // it has to be the last "object" of its first brick.
24539             fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug);
24540         }
24541     }
24542 #endif // FEATURE_STRUCTALIGN
24543
24544 #ifdef SHORT_PLUGS
24545     if (is_plug_padded (plug))
24546     {
24547         make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size));
24548
24549         if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug))
24550         {
24551             // The alignment padding is straddling one or more bricks;
24552             // it has to be the last "object" of its first brick.
24553             fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug);
24554         }
24555     }
24556 #endif //SHORT_PLUGS
24557
24558     gcmemcopy (reloc_plug, plug, size, args->copy_cards_p);
24559
24560     if (args->check_gennum_p)
24561     {
24562         int src_gennum = args->src_gennum;
24563         if (src_gennum == -1)
24564         {
24565             src_gennum = object_gennum (plug);
24566         }
24567
24568         int dest_gennum = object_gennum_plan (reloc_plug);
24569
24570         if (src_gennum < dest_gennum)
24571         {
24572             generation_allocation_size (generation_of (dest_gennum)) += size;
24573         }
24574     }
24575
24576     size_t current_reloc_brick = args->current_compacted_brick;
24577
24578     if (brick_of (reloc_plug) != current_reloc_brick)
24579     {
24580         dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix", 
24581             current_reloc_brick, brick_of (reloc_plug)));
24582
24583         if (args->before_last_plug)
24584         {
24585             dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)",
24586                      current_reloc_brick,
24587                      args->before_last_plug, 
24588                      (args->before_last_plug - brick_address (current_reloc_brick))));
24589
24590             {
24591                 set_brick (current_reloc_brick,
24592                         args->before_last_plug - brick_address (current_reloc_brick));
24593             }
24594         }
24595         current_reloc_brick = brick_of (reloc_plug);
24596     }
24597     size_t end_brick = brick_of (reloc_plug + size-1);
24598     if (end_brick != current_reloc_brick)
24599     {
24600         // The plug is straddling one or more bricks
24601         // It has to be the last plug of its first brick
24602         dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)",
24603                  current_reloc_brick, (size_t)reloc_plug,
24604                  (reloc_plug - brick_address (current_reloc_brick))));
24605
24606         {
24607             set_brick (current_reloc_brick,
24608                     reloc_plug - brick_address (current_reloc_brick));
24609         }
24610         // update all intervening brick
24611         size_t brick = current_reloc_brick + 1;
24612         dprintf (3,("setting intervening bricks %Ix->%Ix to -1",
24613             brick, (end_brick - 1)));
24614         while (brick < end_brick)
24615         {
24616             set_brick (brick, -1);
24617             brick++;
24618         }
24619         // code last brick offset as a plug address
24620         args->before_last_plug = brick_address (end_brick) -1;
24621         current_reloc_brick = end_brick;
24622         dprintf (3, ("setting before last to %Ix, last brick to %Ix",
24623             args->before_last_plug, current_reloc_brick));
24624     } 
24625     else
24626     {
24627         dprintf (3, ("still in the same brick: %Ix", end_brick));
24628         args->before_last_plug = reloc_plug;
24629     }
24630     args->current_compacted_brick = current_reloc_brick;
24631
24632     if (check_last_object_p)
24633     {
24634         mark* entry = args->pinned_plug_entry;
24635
24636         if (args->is_shortened)
24637         {
24638             entry->swap_post_plug_and_saved();
24639         }
24640         else
24641         {
24642             entry->swap_pre_plug_and_saved();
24643         }
24644     }
24645 }
24646
24647 void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args)
24648 {
24649     assert (tree != NULL);
24650     int   left_node = node_left_child (tree);
24651     int   right_node = node_right_child (tree);
24652     ptrdiff_t relocation = node_relocation_distance (tree);
24653
24654     args->print();
24655
24656     if (left_node)
24657     {
24658         dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node)));
24659         compact_in_brick ((tree + left_node), args);
24660     }
24661
24662     uint8_t*  plug = tree;
24663     BOOL   has_pre_plug_info_p = FALSE;
24664     BOOL   has_post_plug_info_p = FALSE;
24665
24666     if (tree == oldest_pinned_plug)
24667     {
24668         args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24669                                                            &has_post_plug_info_p);
24670         assert (tree == pinned_plug (args->pinned_plug_entry));
24671     }
24672
24673     if (args->last_plug != 0)
24674     {
24675         size_t gap_size = node_gap_size (tree);
24676         uint8_t*  gap = (plug - gap_size);
24677         uint8_t*  last_plug_end = gap;
24678         size_t last_plug_size = (last_plug_end - args->last_plug);
24679         dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix", 
24680             tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
24681         
24682         BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24683         if (!check_last_object_p)
24684         {
24685             assert (last_plug_size >= Align (min_obj_size));
24686         }
24687
24688         compact_plug (args->last_plug, last_plug_size, check_last_object_p, args);
24689     }
24690     else
24691     {
24692         assert (!has_pre_plug_info_p);
24693     }
24694
24695     dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation));
24696     args->last_plug = plug;
24697     args->last_plug_relocation = relocation;
24698     args->is_shortened = has_post_plug_info_p;
24699
24700     if (right_node)
24701     {
24702         dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node)));
24703         compact_in_brick ((tree + right_node), args);
24704     }
24705 }
24706
24707 void gc_heap::recover_saved_pinned_info()
24708 {
24709     reset_pinned_queue_bos();
24710
24711     while (!(pinned_plug_que_empty_p()))
24712     {
24713         mark* oldest_entry = oldest_pin();
24714         oldest_entry->recover_plug_info();
24715 #ifdef GC_CONFIG_DRIVEN
24716         if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info())
24717             record_interesting_data_point (idp_pre_and_post_pin);
24718         else if (oldest_entry->has_pre_plug_info())
24719             record_interesting_data_point (idp_pre_pin);
24720         else if (oldest_entry->has_post_plug_info())
24721             record_interesting_data_point (idp_post_pin);
24722 #endif //GC_CONFIG_DRIVEN
24723
24724         deque_pinned_plug();
24725     }
24726 }
24727
24728 void gc_heap::compact_phase (int condemned_gen_number,
24729                              uint8_t*  first_condemned_address,
24730                              BOOL clear_cards)
24731 {
24732 //  %type%  category = quote (compact);
24733 #ifdef TIME_GC
24734         unsigned start;
24735         unsigned finish;
24736         start = GetCycleCount32();
24737 #endif //TIME_GC
24738     generation*   condemned_gen = generation_of (condemned_gen_number);
24739     uint8_t*  start_address = first_condemned_address;
24740     size_t   current_brick = brick_of (start_address);
24741     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24742
24743     PREFIX_ASSUME(current_heap_segment != NULL);
24744
24745     reset_pinned_queue_bos();
24746     update_oldest_pinned_plug();
24747
24748     BOOL reused_seg = expand_reused_seg_p();
24749     if (reused_seg)
24750     {
24751         for (int i = 1; i <= max_generation; i++)
24752         {
24753             generation_allocation_size (generation_of (i)) = 0;
24754         }
24755     }
24756
24757     uint8_t*  end_address = heap_segment_allocated (current_heap_segment);
24758
24759     size_t  end_brick = brick_of (end_address-1);
24760     compact_args args;
24761     args.last_plug = 0;
24762     args.before_last_plug = 0;
24763     args.current_compacted_brick = ~((size_t)1);
24764     args.is_shortened = FALSE;
24765     args.pinned_plug_entry = 0;
24766     args.copy_cards_p =  (condemned_gen_number >= 1) || !clear_cards;
24767     args.check_gennum_p = reused_seg;
24768     if (args.check_gennum_p)
24769     {
24770         args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
24771     }
24772
24773     dprintf (2,("---- Compact Phase: %Ix(%Ix)----", 
24774         first_condemned_address, brick_of (first_condemned_address)));
24775
24776 #ifdef MULTIPLE_HEAPS
24777     //restart
24778     if (gc_t_join.joined())
24779     {
24780 #endif //MULTIPLE_HEAPS
24781
24782 #ifdef MULTIPLE_HEAPS
24783         dprintf(3, ("Restarting for compaction"));
24784         gc_t_join.restart();
24785     }
24786 #endif //MULTIPLE_HEAPS
24787
24788     reset_pinned_queue_bos();
24789
24790 #ifdef FEATURE_LOH_COMPACTION
24791     if (loh_compacted_p)
24792     {
24793         compact_loh();
24794     }
24795 #endif //FEATURE_LOH_COMPACTION
24796
24797     if ((start_address < end_address) ||
24798         (condemned_gen_number == max_generation))
24799     {
24800         while (1)
24801         {
24802             if (current_brick > end_brick)
24803             {
24804                 if (args.last_plug != 0)
24805                 {
24806                     dprintf (3, ("compacting last plug: %Ix", args.last_plug))
24807                     compact_plug (args.last_plug,
24808                                   (heap_segment_allocated (current_heap_segment) - args.last_plug),
24809                                   args.is_shortened,
24810                                   &args);
24811                 }
24812
24813                 if (heap_segment_next_rw (current_heap_segment))
24814                 {
24815                     current_heap_segment = heap_segment_next_rw (current_heap_segment);
24816                     current_brick = brick_of (heap_segment_mem (current_heap_segment));
24817                     end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24818                     args.last_plug = 0;
24819                     if (args.check_gennum_p)
24820                     {
24821                         args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
24822                     }
24823                     continue;
24824                 }
24825                 else
24826                 {
24827                     if (args.before_last_plug !=0)
24828                     {
24829                         dprintf (3, ("Fixing last brick %Ix to point to plug %Ix",
24830                                     args.current_compacted_brick, (size_t)args.before_last_plug));
24831                         assert (args.current_compacted_brick != ~1u);
24832                         set_brick (args.current_compacted_brick,
24833                                    args.before_last_plug - brick_address (args.current_compacted_brick));
24834                     }
24835                     break;
24836                 }
24837             }
24838             {
24839                 int  brick_entry =  brick_table [ current_brick ];
24840                 dprintf (3, ("B: %Ix(%Ix)->%Ix", 
24841                     current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1)));
24842
24843                 if (brick_entry >= 0)
24844                 {
24845                     compact_in_brick ((brick_address (current_brick) + brick_entry -1),
24846                                       &args);
24847
24848                 }
24849             }
24850             current_brick++;
24851         }
24852     }
24853
24854     recover_saved_pinned_info();
24855
24856 #ifdef TIME_GC
24857     finish = GetCycleCount32();
24858     compact_time = finish - start;
24859 #endif //TIME_GC
24860
24861     concurrent_print_time_delta ("compact end");
24862
24863     dprintf(2,("---- End of Compact phase ----"));
24864 }
24865
24866 #ifdef MULTIPLE_HEAPS
24867
24868 #ifdef _MSC_VER
24869 #pragma warning(push)
24870 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
24871 #endif //_MSC_VER
24872 void gc_heap::gc_thread_stub (void* arg)
24873 {
24874     gc_heap* heap = (gc_heap*)arg;
24875     if (!gc_thread_no_affinitize_p)
24876     {
24877         GCThreadAffinity affinity;
24878         affinity.Group = GCThreadAffinity::None;
24879         affinity.Processor = GCThreadAffinity::None;
24880
24881         // We are about to set affinity for GC threads. It is a good place to set up NUMA and
24882         // CPU groups because the process mask, processor number, and group number are all
24883         // readily available.
24884         if (GCToOSInterface::CanEnableGCCPUGroups())
24885             set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
24886         else
24887             set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
24888
24889         if (!GCToOSInterface::SetThreadAffinity(&affinity))
24890         {
24891             dprintf(1, ("Failed to set thread affinity for server GC thread"));
24892         }
24893     }
24894
24895     // server GC threads run at a higher priority than normal.
24896     GCToOSInterface::BoostThreadPriority();
24897     _alloca (256*heap->heap_number);
24898     heap->gc_thread_function();
24899 }
24900 #ifdef _MSC_VER
24901 #pragma warning(pop)
24902 #endif //_MSC_VER
24903
24904 #endif //MULTIPLE_HEAPS
24905
24906 #ifdef BACKGROUND_GC
24907
24908 #ifdef _MSC_VER
24909 #pragma warning(push)
24910 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
24911 #endif //_MSC_VER
24912 void gc_heap::bgc_thread_stub (void* arg)
24913 {
24914     gc_heap* heap = (gc_heap*)arg;
24915     heap->bgc_thread = GCToEEInterface::GetThread();
24916     assert(heap->bgc_thread != nullptr);
24917     heap->bgc_thread_function();
24918 }
24919 #ifdef _MSC_VER
24920 #pragma warning(pop)
24921 #endif //_MSC_VER
24922
24923 #endif //BACKGROUND_GC
24924
24925 /*------------------ Background GC ----------------------------*/
24926
24927 #ifdef BACKGROUND_GC
24928
24929 void gc_heap::background_drain_mark_list (int thread)
24930 {
24931     UNREFERENCED_PARAMETER(thread);
24932
24933     size_t saved_c_mark_list_index = c_mark_list_index;
24934
24935     if (saved_c_mark_list_index)
24936     {
24937         concurrent_print_time_delta ("SML");
24938     }
24939     while (c_mark_list_index != 0)
24940     {
24941         size_t current_index = c_mark_list_index - 1;
24942         uint8_t* o = c_mark_list [current_index];
24943         background_mark_object (o THREAD_NUMBER_ARG);
24944         c_mark_list_index--;
24945     }
24946     if (saved_c_mark_list_index)
24947     {
24948
24949         concurrent_print_time_delta ("EML");
24950     }
24951
24952     fire_drain_mark_list_event (saved_c_mark_list_index);
24953 }
24954
24955
24956 // The background GC version of scan_dependent_handles (see that method for a more in-depth comment).
24957 #ifdef MULTIPLE_HEAPS
24958 // Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning
24959 // them. So we can use the same static variables.
24960 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
24961 {
24962     // Whenever we call this method there may have been preceding object promotions. So set
24963     // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
24964     // based on the how the scanning proceeded).
24965     s_fUnscannedPromotions = TRUE;
24966
24967     // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
24968     // the state of this thread's portion of the dependent handle table. That's because promotions on other
24969     // threads could cause handle promotions to become necessary here. Even if there are definitely no more
24970     // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
24971     // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
24972     // as all the others or they'll get out of step).
24973     while (true)
24974     {
24975         // The various worker threads are all currently racing in this code. We need to work out if at least
24976         // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
24977         // dependent handle table when both of the following conditions apply:
24978         //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
24979         //     object happens to correspond to a primary in one of our handles we might potentially have to
24980         //     promote the associated secondary).
24981         //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
24982         //
24983         // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
24984         // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
24985         // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
24986         // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
24987         // follows below. Note that we can't read this outside of the join since on any iteration apart from
24988         // the first threads will be racing between reading this value and completing their previous
24989         // iteration's table scan.
24990         //
24991         // The second condition is tracked by the dependent handle code itself on a per worker thread basis
24992         // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
24993         // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
24994         // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
24995         // we're safely joined.
24996         if (GCScan::GcDhUnpromotedHandlesExist(sc))
24997             s_fUnpromotedHandles = TRUE;
24998
24999         // Synchronize all the threads so we can read our state variables safely. The following shared
25000         // variable (indicating whether we should scan the tables or terminate the loop) will be set by a
25001         // single thread inside the join.
25002         bgc_t_join.join(this, gc_join_scan_dependent_handles);
25003         if (bgc_t_join.joined())
25004         {
25005             // We're synchronized so it's safe to read our shared state variables. We update another shared
25006             // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
25007             // the loop. We scan if there has been at least one object promotion since last time and at least
25008             // one thread has a dependent handle table with a potential handle promotion possible.
25009             s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
25010
25011             // Reset our shared state variables (ready to be set again on this scan or with a good initial
25012             // value for the next call if we're terminating the loop).
25013             s_fUnscannedPromotions = FALSE;
25014             s_fUnpromotedHandles = FALSE;
25015
25016             if (!s_fScanRequired)
25017             {
25018                 uint8_t* all_heaps_max = 0;
25019                 uint8_t* all_heaps_min = MAX_PTR;
25020                 int i;
25021                 for (i = 0; i < n_heaps; i++)
25022                 {
25023                     if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
25024                         all_heaps_max = g_heaps[i]->background_max_overflow_address;
25025                     if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
25026                         all_heaps_min = g_heaps[i]->background_min_overflow_address;
25027                 }
25028                 for (i = 0; i < n_heaps; i++)
25029                 {
25030                     g_heaps[i]->background_max_overflow_address = all_heaps_max;
25031                     g_heaps[i]->background_min_overflow_address = all_heaps_min;
25032                 }
25033             }
25034
25035             // Restart all the workers.
25036             dprintf(2, ("Starting all gc thread mark stack overflow processing"));
25037             bgc_t_join.restart();
25038         }
25039
25040         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25041         // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
25042         // global flag indicating that at least one object promotion may have occurred (the usual comment
25043         // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
25044         // exit the method since we unconditionally set this variable on method entry anyway).
25045         if (background_process_mark_overflow (sc->concurrent))
25046             s_fUnscannedPromotions = TRUE;
25047
25048         // If we decided that no scan was required we can terminate the loop now.
25049         if (!s_fScanRequired)
25050             break;
25051
25052         // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
25053         // processed before we start scanning dependent handle tables (if overflows remain while we scan we
25054         // could miss noting the promotion of some primary objects).
25055         bgc_t_join.join(this, gc_join_rescan_dependent_handles);
25056         if (bgc_t_join.joined())
25057         {
25058             // Restart all the workers.
25059             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
25060             bgc_t_join.restart();
25061         }
25062
25063         // If the portion of the dependent handle table managed by this worker has handles that could still be
25064         // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
25065         // could require a rescan of handles on this or other workers.
25066         if (GCScan::GcDhUnpromotedHandlesExist(sc))
25067             if (GCScan::GcDhReScan(sc))
25068                 s_fUnscannedPromotions = TRUE;
25069     }
25070 }
25071 #else
25072 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
25073 {
25074     // Whenever we call this method there may have been preceding object promotions. So set
25075     // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
25076     // based on the how the scanning proceeded).
25077     bool fUnscannedPromotions = true;
25078
25079     // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a
25080     // scan without performing any new promotions.
25081     while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
25082     {
25083         // On each iteration of the loop start with the assumption that no further objects have been promoted.
25084         fUnscannedPromotions = false;
25085
25086         // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25087         // being visible. If there was an overflow (background_process_mark_overflow returned true) then
25088         // additional objects now appear to be promoted and we should set the flag.
25089         if (background_process_mark_overflow (sc->concurrent))
25090             fUnscannedPromotions = true;
25091
25092         // Perform the scan and set the flag if any promotions resulted.
25093         if (GCScan::GcDhReScan (sc))
25094             fUnscannedPromotions = true;
25095     }
25096
25097     // Perform a last processing of any overflowed mark stack.
25098     background_process_mark_overflow (sc->concurrent);
25099 }
25100 #endif //MULTIPLE_HEAPS
25101
25102 void gc_heap::recover_bgc_settings()
25103 {
25104     if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
25105     {
25106         dprintf (2, ("restoring bgc settings"));
25107         settings = saved_bgc_settings;
25108         GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
25109     }
25110 }
25111
25112 void gc_heap::allow_fgc()
25113 {
25114     assert (bgc_thread == GCToEEInterface::GetThread());
25115     bool bToggleGC = false;
25116
25117     if (g_fSuspensionPending > 0)
25118     {
25119         bToggleGC = GCToEEInterface::EnablePreemptiveGC();
25120         if (bToggleGC)
25121         {
25122             GCToEEInterface::DisablePreemptiveGC();
25123         }
25124     }
25125 }
25126
25127 BOOL gc_heap::should_commit_mark_array()
25128 {
25129     return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
25130 }
25131
25132 void gc_heap::clear_commit_flag()
25133 {
25134     generation* gen = generation_of (max_generation);
25135     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25136     while (1)
25137     {
25138         if (seg == 0)
25139         {
25140             if (gen != large_object_generation)
25141             {
25142                 gen = large_object_generation;
25143                 seg = heap_segment_in_range (generation_start_segment (gen));
25144             }
25145             else
25146             {
25147                 break;
25148             }
25149         }
25150
25151         if (seg->flags & heap_segment_flags_ma_committed)
25152         {
25153             seg->flags &= ~heap_segment_flags_ma_committed;
25154         }
25155
25156         if (seg->flags & heap_segment_flags_ma_pcommitted)
25157         {
25158             seg->flags &= ~heap_segment_flags_ma_pcommitted;
25159         }
25160
25161         seg = heap_segment_next (seg);
25162     }
25163 }
25164
25165 void gc_heap::clear_commit_flag_global()
25166 {
25167 #ifdef MULTIPLE_HEAPS
25168     for (int i = 0; i < n_heaps; i++)
25169     {
25170         g_heaps[i]->clear_commit_flag();
25171     }
25172 #else
25173     clear_commit_flag();
25174 #endif //MULTIPLE_HEAPS
25175 }
25176
25177 void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25178 {
25179 #ifdef _DEBUG
25180     size_t  markw = mark_word_of (begin);
25181     size_t  markw_end = mark_word_of (end);
25182
25183     while (markw < markw_end)
25184     {
25185         if (mark_array_addr[markw])
25186         {
25187             dprintf  (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
25188                             markw, mark_array_addr[markw], mark_word_address (markw)));
25189             FATAL_GC_ERROR();
25190         }
25191         markw++;
25192     }
25193 #else // _DEBUG
25194     UNREFERENCED_PARAMETER(begin);
25195     UNREFERENCED_PARAMETER(end);
25196     UNREFERENCED_PARAMETER(mark_array_addr);
25197 #endif //_DEBUG
25198 }
25199
25200 void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
25201 {
25202     verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
25203 }
25204
25205 BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp, 
25206                                          heap_segment* seg,
25207                                          uint32_t* new_card_table,
25208                                          uint8_t* new_lowest_address)
25209 {
25210     UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
25211
25212     uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25213     uint8_t* end = heap_segment_reserved (seg);
25214
25215     uint8_t* lowest = hp->background_saved_lowest_address;
25216     uint8_t* highest = hp->background_saved_highest_address;
25217
25218     uint8_t* commit_start = NULL;
25219     uint8_t* commit_end = NULL;
25220     size_t commit_flag = 0;
25221
25222     if ((highest >= start) &&
25223         (lowest <= end))
25224     {
25225         if ((start >= lowest) && (end <= highest))
25226         {
25227             dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25228                                     start, end, lowest, highest));
25229             commit_flag = heap_segment_flags_ma_committed;
25230         }
25231         else
25232         {
25233             dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25234                                     start, end, lowest, highest));
25235             commit_flag = heap_segment_flags_ma_pcommitted;
25236         }
25237
25238         commit_start = max (lowest, start);
25239         commit_end = min (highest, end);
25240
25241         if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array))
25242         {
25243             return FALSE;
25244         }
25245
25246         if (new_card_table == 0)
25247         {
25248             new_card_table = g_gc_card_table;
25249         }
25250
25251         if (hp->card_table != new_card_table)
25252         {
25253             if (new_lowest_address == 0)
25254             {
25255                 new_lowest_address = g_gc_lowest_address;
25256             }
25257
25258             uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
25259             uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address));
25260
25261             dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix", 
25262                                     hp->card_table, new_card_table,
25263                                     hp->mark_array, ma));
25264
25265             if (!commit_mark_array_by_range (commit_start, commit_end, ma))
25266             {
25267                 return FALSE;
25268             }
25269         }
25270
25271         seg->flags |= commit_flag;
25272     }
25273
25274     return TRUE;
25275 }
25276
25277 BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25278 {
25279     size_t beg_word = mark_word_of (begin);
25280     size_t end_word = mark_word_of (align_on_mark_word (end));
25281     uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]);
25282     uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]);
25283     size_t size = (size_t)(commit_end - commit_start);
25284
25285 #ifdef SIMPLE_DPRINTF
25286     dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)",
25287                             begin, end,
25288                             beg_word, end_word,
25289                             (end_word - beg_word) * sizeof (uint32_t),
25290                             &mark_array_addr[beg_word],
25291                             &mark_array_addr[end_word],
25292                             (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]),
25293                             commit_start, commit_end,
25294                             size));
25295 #endif //SIMPLE_DPRINTF
25296
25297     if (GCToOSInterface::VirtualCommit (commit_start, size))
25298     {
25299         // We can only verify the mark array is cleared from begin to end, the first and the last
25300         // page aren't necessarily all cleared 'cause they could be used by other segments or 
25301         // card bundle.
25302         verify_mark_array_cleared (begin, end, mark_array_addr);
25303         return TRUE;
25304     }
25305     else
25306     {
25307         dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t)));
25308         return FALSE;
25309     }
25310 }
25311
25312 BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr)
25313 {
25314     uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25315     uint8_t* end = heap_segment_reserved (seg);
25316
25317 #ifdef MULTIPLE_HEAPS
25318     uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address;
25319     uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address;
25320 #else
25321     uint8_t* lowest = background_saved_lowest_address;
25322     uint8_t* highest = background_saved_highest_address;
25323 #endif //MULTIPLE_HEAPS
25324
25325     if ((highest >= start) &&
25326         (lowest <= end))
25327     {
25328         start = max (lowest, start);
25329         end = min (highest, end);
25330         if (!commit_mark_array_by_range (start, end, new_mark_array_addr))
25331         {
25332             return FALSE;
25333         }
25334     }
25335
25336     return TRUE;
25337 }
25338
25339 BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr)
25340 {
25341     dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix",
25342         seg,
25343         heap_segment_reserved (seg),
25344         mark_array_addr));
25345     uint8_t* start = (heap_segment_read_only_p (seg) ? heap_segment_mem (seg) : (uint8_t*)seg);
25346
25347     return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
25348 }
25349
25350 BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
25351 {
25352     UNREFERENCED_PARAMETER(mark_array_addr);
25353
25354     dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix", 
25355                             lowest_address, highest_address, mark_array));
25356
25357     generation* gen = generation_of (max_generation);
25358     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25359     while (1)
25360     {
25361         if (seg == 0)
25362         {
25363             if (gen != large_object_generation)
25364             {
25365                 gen = large_object_generation;
25366                 seg = heap_segment_in_range (generation_start_segment (gen));
25367             }
25368             else
25369             {
25370                 break;
25371             }
25372         }
25373
25374         dprintf (GC_TABLE_LOG, ("seg: %Ix, flags: %Id", seg, seg->flags));
25375
25376         if (!(seg->flags & heap_segment_flags_ma_committed))
25377         {
25378             // For ro segments they could always be only partially in range so we'd
25379             // be calling this at the beginning of every BGC. We are not making this 
25380             // more efficient right now - ro segments are currently only used by redhawk.
25381             if (heap_segment_read_only_p (seg))
25382             {
25383                 if ((heap_segment_mem (seg) >= lowest_address) && 
25384                     (heap_segment_reserved (seg) <= highest_address))
25385                 {
25386                     if (commit_mark_array_by_seg (seg, mark_array))
25387                     {
25388                         seg->flags |= heap_segment_flags_ma_committed;
25389                     }
25390                     else
25391                     {
25392                         return FALSE;
25393                     }
25394                 }
25395                 else
25396                 {
25397                     uint8_t* start = max (lowest_address, heap_segment_mem (seg));
25398                     uint8_t* end = min (highest_address, heap_segment_reserved (seg));
25399                     if (commit_mark_array_by_range (start, end, mark_array))
25400                     {
25401                         seg->flags |= heap_segment_flags_ma_pcommitted;
25402                     }
25403                     else
25404                     {
25405                         return FALSE;
25406                     }
25407                 }
25408             }
25409             else
25410             {
25411                 // For normal segments they are by design completely in range so just 
25412                 // commit the whole mark array for each seg.
25413                 if (commit_mark_array_by_seg (seg, mark_array))
25414                 {
25415                     if (seg->flags & heap_segment_flags_ma_pcommitted)
25416                     {
25417                         seg->flags &= ~heap_segment_flags_ma_pcommitted;
25418                     }
25419                     seg->flags |= heap_segment_flags_ma_committed;
25420                 }
25421                 else
25422                 {
25423                     return FALSE;
25424                 }
25425             }
25426         }
25427
25428         seg = heap_segment_next (seg);
25429     }
25430
25431     return TRUE;
25432 }
25433
25434 // This function doesn't check the commit flag since it's for a new array -
25435 // the mark_array flag for these segments will remain the same.
25436 BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr)
25437 {
25438     dprintf (GC_TABLE_LOG, ("commiting existing segs on MA %Ix", new_mark_array_addr));
25439     generation* gen = generation_of (max_generation);
25440     heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25441     while (1)
25442     {
25443         if (seg == 0)
25444         {
25445             if (gen != large_object_generation)
25446             {
25447                 gen = large_object_generation;
25448                 seg = heap_segment_in_range (generation_start_segment (gen));
25449             }
25450             else
25451             {
25452                 break;
25453             }
25454         }
25455
25456         if (!commit_mark_array_with_check (seg, new_mark_array_addr))
25457         {
25458             return FALSE;
25459         }
25460
25461         seg = heap_segment_next (seg);
25462     }
25463
25464 #ifdef MULTIPLE_HEAPS
25465     if (new_heap_segment)
25466     {
25467         if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr))
25468         {
25469             return FALSE;
25470         }        
25471     }
25472 #endif //MULTIPLE_HEAPS
25473
25474     return TRUE;
25475 }
25476
25477 BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array)
25478 {
25479 #ifdef MULTIPLE_HEAPS
25480     for (int i = 0; i < n_heaps; i++)
25481     {
25482         if (!g_heaps[i]->commit_new_mark_array (new_mark_array))
25483         {
25484             return FALSE;
25485         }
25486     }
25487 #else
25488     if (!commit_new_mark_array (new_mark_array))
25489     {
25490         return FALSE;
25491     }
25492 #endif //MULTIPLE_HEAPS
25493
25494     return TRUE;
25495 }
25496
25497 void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
25498 {
25499     // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have
25500     // been set to NULL. 
25501     if (mark_array == NULL)
25502     {
25503         return;
25504     }
25505
25506     dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array));
25507
25508     size_t flags = seg->flags;
25509
25510     if ((flags & heap_segment_flags_ma_committed) ||
25511         (flags & heap_segment_flags_ma_pcommitted))
25512     {
25513         uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25514         uint8_t* end = heap_segment_reserved (seg);
25515
25516         if (flags & heap_segment_flags_ma_pcommitted)
25517         {
25518             start = max (lowest_address, start);
25519             end = min (highest_address, end);
25520         }
25521
25522         size_t beg_word = mark_word_of (start);
25523         size_t end_word = mark_word_of (align_on_mark_word (end));
25524         uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]);
25525         uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]);
25526         size_t size = (size_t)(decommit_end - decommit_start);
25527
25528 #ifdef SIMPLE_DPRINTF
25529         dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)",
25530                                 seg,
25531                                 beg_word, end_word,
25532                                 (end_word - beg_word) * sizeof (uint32_t),
25533                                 &mark_array[beg_word],
25534                                 &mark_array[end_word],
25535                                 (size_t)(&mark_array[end_word] - &mark_array[beg_word]),
25536                                 decommit_start, decommit_end,
25537                                 size));
25538 #endif //SIMPLE_DPRINTF
25539         
25540         if (decommit_start < decommit_end)
25541         {
25542             if (!GCToOSInterface::VirtualDecommit (decommit_start, size))
25543             {
25544                 dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualDecommit on %Ix for %Id bytes failed", 
25545                                         decommit_start, size));
25546                 assert (!"decommit failed");
25547             }
25548         }
25549
25550         dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg));
25551     }
25552 }
25553
25554 void gc_heap::background_mark_phase ()
25555 {
25556     verify_mark_array_cleared();
25557
25558     ScanContext sc;
25559     sc.thread_number = heap_number;
25560     sc.promotion = TRUE;
25561     sc.concurrent = FALSE;
25562
25563     THREAD_FROM_HEAP;
25564     BOOL cooperative_mode = TRUE;
25565 #ifndef MULTIPLE_HEAPS
25566     const int thread = heap_number;
25567 #endif //!MULTIPLE_HEAPS
25568
25569     dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index)));
25570
25571     assert (settings.concurrent);
25572
25573 #ifdef TIME_GC
25574     unsigned start;
25575     unsigned finish;
25576     start = GetCycleCount32();
25577 #endif //TIME_GC
25578
25579 #ifdef FFIND_OBJECT
25580     if (gen0_must_clear_bricks > 0)
25581         gen0_must_clear_bricks--;
25582 #endif //FFIND_OBJECT
25583
25584     background_soh_alloc_count = 0;
25585     background_loh_alloc_count = 0;
25586     bgc_overflow_count = 0;
25587
25588     bpromoted_bytes (heap_number) = 0;
25589     static uint32_t num_sizedrefs = 0;
25590
25591     background_min_overflow_address = MAX_PTR;
25592     background_max_overflow_address = 0;
25593     background_min_soh_overflow_address = MAX_PTR;
25594     background_max_soh_overflow_address = 0;
25595     processed_soh_overflow_p = FALSE;
25596
25597     {
25598         //set up the mark lists from g_mark_list
25599         assert (g_mark_list);
25600         mark_list = g_mark_list;
25601         //dont use the mark list for full gc
25602         //because multiple segments are more complex to handle and the list
25603         //is likely to overflow
25604         mark_list_end = &mark_list [0];
25605         mark_list_index = &mark_list [0];
25606
25607         c_mark_list_index = 0;
25608
25609         shigh = (uint8_t*) 0;
25610         slow  = MAX_PTR;
25611
25612         generation*   gen = generation_of (max_generation);
25613
25614         dprintf(3,("BGC: stack marking"));
25615         sc.concurrent = TRUE;
25616
25617         GCScan::GcScanRoots(background_promote_callback,
25618                                 max_generation, max_generation,
25619                                 &sc);
25620     }
25621
25622     {
25623         dprintf(3,("BGC: finalization marking"));
25624         finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0);
25625     }
25626
25627     size_t total_loh_size = generation_size (max_generation + 1);
25628     bgc_begin_loh_size = total_loh_size;
25629     bgc_alloc_spin_loh = 0;
25630     bgc_loh_size_increased = 0;
25631     bgc_loh_allocated_in_free = 0;
25632     size_t total_soh_size = generation_sizes (generation_of (max_generation));
25633
25634     dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
25635
25636     {
25637         //concurrent_print_time_delta ("copying stack roots");
25638         concurrent_print_time_delta ("CS");
25639
25640         FIRE_EVENT(BGC1stNonConEnd);
25641
25642         expanded_in_fgc = FALSE;
25643         saved_overflow_ephemeral_seg = 0;
25644         current_bgc_state = bgc_reset_ww;
25645
25646         // we don't need a join here - just whichever thread that gets here
25647         // first can change the states and call restart_vm.
25648         // this is not true - we can't let the EE run when we are scanning stack.
25649         // since we now allow reset ww to run concurrently and have a join for it,
25650         // we can do restart ee on the 1st thread that got here. Make sure we handle the 
25651         // sizedref handles correctly.
25652 #ifdef MULTIPLE_HEAPS
25653         bgc_t_join.join(this, gc_join_restart_ee);
25654         if (bgc_t_join.joined())
25655 #endif //MULTIPLE_HEAPS
25656         {
25657 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25658             // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
25659             // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
25660             // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
25661 #ifdef WRITE_WATCH
25662             concurrent_print_time_delta ("CRWW begin");
25663
25664 #ifdef MULTIPLE_HEAPS
25665             for (int i = 0; i < n_heaps; i++)
25666             {
25667                 g_heaps[i]->reset_write_watch (FALSE);
25668             }
25669 #else
25670             reset_write_watch (FALSE);
25671 #endif //MULTIPLE_HEAPS
25672
25673             concurrent_print_time_delta ("CRWW");
25674 #endif //WRITE_WATCH
25675 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25676
25677             num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
25678
25679             // this c_write is not really necessary because restart_vm
25680             // has an instruction that will flush the cpu cache (interlocked
25681             // or whatever) but we don't want to rely on that.
25682             dprintf (BGC_LOG, ("setting cm_in_progress"));
25683             c_write (cm_in_progress, TRUE);
25684
25685             //restart all thread, doing the marking from the array
25686             assert (dont_restart_ee_p);
25687             dont_restart_ee_p = FALSE;
25688
25689             restart_vm();
25690             GCToOSInterface::YieldThread (0);
25691 #ifdef MULTIPLE_HEAPS
25692             dprintf(3, ("Starting all gc threads for gc"));
25693             bgc_t_join.restart();
25694 #endif //MULTIPLE_HEAPS
25695         }
25696
25697 #ifdef MULTIPLE_HEAPS
25698         bgc_t_join.join(this, gc_join_after_reset);
25699         if (bgc_t_join.joined())
25700 #endif //MULTIPLE_HEAPS
25701         {
25702             disable_preemptive (true);
25703
25704 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25705             // When software write watch is enabled, resetting write watch is done while the runtime is suspended above. The
25706             // post-reset call to revisit_written_pages is only necessary for concurrent reset_write_watch, to discard dirtied
25707             // pages during the concurrent reset.
25708
25709 #ifdef WRITE_WATCH
25710             concurrent_print_time_delta ("CRWW begin");
25711
25712 #ifdef MULTIPLE_HEAPS
25713             for (int i = 0; i < n_heaps; i++)
25714             {
25715                 g_heaps[i]->reset_write_watch (TRUE);
25716             }
25717 #else
25718             reset_write_watch (TRUE);
25719 #endif //MULTIPLE_HEAPS
25720
25721             concurrent_print_time_delta ("CRWW");
25722 #endif //WRITE_WATCH
25723
25724 #ifdef MULTIPLE_HEAPS
25725             for (int i = 0; i < n_heaps; i++)
25726             {
25727                 g_heaps[i]->revisit_written_pages (TRUE, TRUE);
25728             }
25729 #else
25730             revisit_written_pages (TRUE, TRUE);
25731 #endif //MULTIPLE_HEAPS
25732
25733             concurrent_print_time_delta ("CRW");
25734 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25735
25736 #ifdef MULTIPLE_HEAPS
25737             for (int i = 0; i < n_heaps; i++)
25738             {
25739                 g_heaps[i]->current_bgc_state = bgc_mark_handles;
25740             }
25741 #else
25742             current_bgc_state = bgc_mark_handles;
25743 #endif //MULTIPLE_HEAPS
25744
25745             current_c_gc_state = c_gc_state_marking;
25746
25747             enable_preemptive ();
25748
25749 #ifdef MULTIPLE_HEAPS
25750             dprintf(3, ("Joining BGC threads after resetting writewatch"));
25751             bgc_t_join.restart();
25752 #endif //MULTIPLE_HEAPS
25753         }
25754
25755         disable_preemptive (true);
25756
25757         if (num_sizedrefs > 0)
25758         {
25759             GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);
25760
25761             enable_preemptive ();
25762
25763 #ifdef MULTIPLE_HEAPS
25764             bgc_t_join.join(this, gc_join_scan_sizedref_done);
25765             if (bgc_t_join.joined())
25766             {
25767                 dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots"));
25768                 bgc_t_join.restart();
25769             }
25770 #endif //MULTIPLE_HEAPS
25771
25772             disable_preemptive (true);
25773         }
25774
25775         dprintf (3,("BGC: handle table marking"));
25776         GCScan::GcScanHandles(background_promote,
25777                                   max_generation, max_generation,
25778                                   &sc);
25779         //concurrent_print_time_delta ("concurrent marking handle table");
25780         concurrent_print_time_delta ("CRH");
25781
25782         current_bgc_state = bgc_mark_stack;
25783         dprintf (2,("concurrent draining mark list"));
25784         background_drain_mark_list (thread);
25785         //concurrent_print_time_delta ("concurrent marking stack roots");
25786         concurrent_print_time_delta ("CRS");
25787
25788         dprintf (2,("concurrent revisiting dirtied pages"));
25789         revisit_written_pages (TRUE);
25790         revisit_written_pages (TRUE);
25791         //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH");
25792         concurrent_print_time_delta ("CRre");
25793
25794         enable_preemptive ();
25795
25796 #ifdef MULTIPLE_HEAPS
25797         bgc_t_join.join(this, gc_join_concurrent_overflow);
25798         if (bgc_t_join.joined())
25799         {
25800             uint8_t* all_heaps_max = 0;
25801             uint8_t* all_heaps_min = MAX_PTR;
25802             int i;
25803             for (i = 0; i < n_heaps; i++)
25804             {
25805                 dprintf (3, ("heap %d overflow max is %Ix, min is %Ix", 
25806                     i,
25807                     g_heaps[i]->background_max_overflow_address,
25808                     g_heaps[i]->background_min_overflow_address));
25809                 if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
25810                     all_heaps_max = g_heaps[i]->background_max_overflow_address;
25811                 if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
25812                     all_heaps_min = g_heaps[i]->background_min_overflow_address;
25813             }
25814             for (i = 0; i < n_heaps; i++)
25815             {
25816                 g_heaps[i]->background_max_overflow_address = all_heaps_max;
25817                 g_heaps[i]->background_min_overflow_address = all_heaps_min;
25818             }
25819             dprintf(3, ("Starting all bgc threads after updating the overflow info"));
25820             bgc_t_join.restart();
25821         }
25822 #endif //MULTIPLE_HEAPS
25823
25824         disable_preemptive (true);
25825
25826         dprintf (2, ("before CRov count: %d", bgc_overflow_count));
25827         bgc_overflow_count = 0;
25828         background_process_mark_overflow (TRUE);
25829         dprintf (2, ("after CRov count: %d", bgc_overflow_count));
25830         bgc_overflow_count = 0;
25831         //concurrent_print_time_delta ("concurrent processing mark overflow");
25832         concurrent_print_time_delta ("CRov");
25833
25834         // Stop all threads, crawl all stacks and revisit changed pages.
25835         FIRE_EVENT(BGC1stConEnd);
25836
25837         dprintf (2, ("Stopping the EE"));
25838
25839         enable_preemptive ();
25840
25841 #ifdef MULTIPLE_HEAPS
25842         bgc_t_join.join(this, gc_join_suspend_ee);
25843         if (bgc_t_join.joined())
25844         {
25845             bgc_threads_sync_event.Reset();
25846
25847             dprintf(3, ("Joining BGC threads for non concurrent final marking"));
25848             bgc_t_join.restart();
25849         }
25850 #endif //MULTIPLE_HEAPS
25851
25852         if (heap_number == 0)
25853         {
25854             enter_spin_lock (&gc_lock);
25855
25856             bgc_suspend_EE ();
25857             //suspend_EE ();
25858             bgc_threads_sync_event.Set();
25859         }
25860         else
25861         {
25862             bgc_threads_sync_event.Wait(INFINITE, FALSE);
25863             dprintf (2, ("bgc_threads_sync_event is signalled"));
25864         }
25865
25866         assert (settings.concurrent);
25867         assert (settings.condemned_generation == max_generation);
25868
25869         dprintf (2, ("clearing cm_in_progress"));
25870         c_write (cm_in_progress, FALSE);
25871
25872         bgc_alloc_lock->check();
25873
25874         current_bgc_state = bgc_final_marking;
25875
25876         //concurrent_print_time_delta ("concurrent marking ended");
25877         concurrent_print_time_delta ("CR");
25878
25879         FIRE_EVENT(BGC2ndNonConBegin);
25880
25881         mark_absorb_new_alloc();
25882
25883         // We need a join here 'cause find_object would complain if the gen0
25884         // bricks of another heap haven't been fixed up. So we need to make sure
25885         // that every heap's gen0 bricks are fixed up before we proceed.
25886 #ifdef MULTIPLE_HEAPS
25887         bgc_t_join.join(this, gc_join_after_absorb);
25888         if (bgc_t_join.joined())
25889         {
25890             dprintf(3, ("Joining BGC threads after absorb"));
25891             bgc_t_join.restart();
25892         }
25893 #endif //MULTIPLE_HEAPS
25894
25895         // give VM a chance to do work
25896         GCToEEInterface::GcBeforeBGCSweepWork();
25897
25898         //reset the flag, indicating that the EE no longer expect concurrent
25899         //marking
25900         sc.concurrent = FALSE;
25901
25902         total_loh_size = generation_size (max_generation + 1);
25903         total_soh_size = generation_sizes (generation_of (max_generation));
25904
25905         dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
25906
25907         dprintf (2, ("nonconcurrent marking stack roots"));
25908         GCScan::GcScanRoots(background_promote,
25909                                 max_generation, max_generation,
25910                                 &sc);
25911         //concurrent_print_time_delta ("nonconcurrent marking stack roots");
25912         concurrent_print_time_delta ("NRS");
25913
25914 //        finalize_queue->EnterFinalizeLock();
25915         finalize_queue->GcScanRoots(background_promote, heap_number, 0);
25916 //        finalize_queue->LeaveFinalizeLock();
25917
25918         dprintf (2, ("nonconcurrent marking handle table"));
25919         GCScan::GcScanHandles(background_promote,
25920                                   max_generation, max_generation,
25921                                   &sc);
25922         //concurrent_print_time_delta ("nonconcurrent marking handle table");
25923         concurrent_print_time_delta ("NRH");
25924
25925         dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index)));
25926         revisit_written_pages (FALSE);
25927         //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH");
25928         concurrent_print_time_delta ("NRre LOH");
25929
25930 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25931 #ifdef MULTIPLE_HEAPS
25932         bgc_t_join.join(this, gc_join_disable_software_write_watch);
25933         if (bgc_t_join.joined())
25934 #endif // MULTIPLE_HEAPS
25935         {
25936             // The runtime is suspended, and we will be doing a final query of dirty pages, so pause tracking written pages to
25937             // avoid further perf penalty after the runtime is restarted
25938             SoftwareWriteWatch::DisableForGCHeap();
25939
25940 #ifdef MULTIPLE_HEAPS
25941             dprintf(3, ("Restarting BGC threads after disabling software write watch"));
25942             bgc_t_join.restart();
25943 #endif // MULTIPLE_HEAPS
25944         }
25945 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
25946
25947         dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count));
25948         bgc_overflow_count = 0;
25949
25950         // Dependent handles need to be scanned with a special algorithm (see the header comment on
25951         // scan_dependent_handles for more detail). We perform an initial scan without processing any mark
25952         // stack overflow. This is not guaranteed to complete the operation but in a common case (where there
25953         // are no dependent handles that are due to be collected) it allows us to optimize away further scans.
25954         // The call to background_scan_dependent_handles is what will cycle through more iterations if
25955         // required and will also perform processing of any mark stack overflow once the dependent handle
25956         // table has been fully promoted.
25957         dprintf (2, ("1st dependent handle scan and process mark overflow"));
25958         GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
25959         background_scan_dependent_handles (&sc);
25960         //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow");
25961         concurrent_print_time_delta ("NR 1st Hov");
25962
25963         dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count));
25964         bgc_overflow_count = 0;
25965
25966 #ifdef MULTIPLE_HEAPS
25967         bgc_t_join.join(this, gc_join_null_dead_short_weak);
25968         if (bgc_t_join.joined())
25969 #endif //MULTIPLE_HEAPS
25970         {
25971             GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc);
25972
25973 #ifdef MULTIPLE_HEAPS
25974             dprintf(3, ("Joining BGC threads for short weak handle scan"));
25975             bgc_t_join.restart();
25976 #endif //MULTIPLE_HEAPS
25977         }
25978
25979         // null out the target of short weakref that were not promoted.
25980         GCScan::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);
25981
25982         //concurrent_print_time_delta ("bgc GcShortWeakPtrScan");
25983         concurrent_print_time_delta ("NR GcShortWeakPtrScan");
25984     }
25985
25986     {
25987 #ifdef MULTIPLE_HEAPS
25988         bgc_t_join.join(this, gc_join_scan_finalization);
25989         if (bgc_t_join.joined())
25990         {
25991             dprintf(3, ("Joining BGC threads for finalization"));
25992             bgc_t_join.restart();
25993         }
25994 #endif //MULTIPLE_HEAPS
25995
25996         //Handle finalization.
25997         dprintf(3,("Marking finalization data"));
25998         //concurrent_print_time_delta ("bgc joined to mark finalization");
25999         concurrent_print_time_delta ("NRj");
26000
26001 //        finalize_queue->EnterFinalizeLock();
26002         finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this);
26003 //        finalize_queue->LeaveFinalizeLock();
26004
26005         concurrent_print_time_delta ("NRF");
26006     }
26007
26008     dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count));
26009     bgc_overflow_count = 0;
26010
26011     // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
26012     // for finalization. As before background_scan_dependent_handles will also process any mark stack
26013     // overflow.
26014     dprintf (2, ("2nd dependent handle scan and process mark overflow"));
26015     background_scan_dependent_handles (&sc);
26016     //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow");
26017     concurrent_print_time_delta ("NR 2nd Hov");
26018
26019 #ifdef MULTIPLE_HEAPS
26020     bgc_t_join.join(this, gc_join_null_dead_long_weak);
26021     if (bgc_t_join.joined())
26022     {
26023         dprintf(2, ("Joining BGC threads for weak pointer deletion"));
26024         bgc_t_join.restart();
26025     }
26026 #endif //MULTIPLE_HEAPS
26027
26028     // null out the target of long weakref that were not promoted.
26029     GCScan::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
26030     concurrent_print_time_delta ("NR GcWeakPtrScan");
26031
26032 #ifdef MULTIPLE_HEAPS
26033     bgc_t_join.join(this, gc_join_null_dead_syncblk);
26034     if (bgc_t_join.joined())
26035 #endif //MULTIPLE_HEAPS
26036     {
26037         dprintf (2, ("calling GcWeakPtrScanBySingleThread"));
26038         // scan for deleted entries in the syncblk cache
26039         GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
26040         concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread");
26041 #ifdef MULTIPLE_HEAPS
26042         dprintf(2, ("Starting BGC threads for end of background mark phase"));
26043         bgc_t_join.restart();
26044 #endif //MULTIPLE_HEAPS
26045     }
26046
26047     gen0_bricks_cleared = FALSE;
26048
26049     dprintf (2, ("end of bgc mark: loh: %d, soh: %d", 
26050                  generation_size (max_generation + 1), 
26051                  generation_sizes (generation_of (max_generation))));
26052
26053     for (int gen_idx = max_generation; gen_idx <= (max_generation + 1); gen_idx++)
26054     {
26055         generation* gen = generation_of (gen_idx);
26056         dynamic_data* dd = dynamic_data_of (gen_idx);
26057         dd_begin_data_size (dd) = generation_size (gen_idx) - 
26058                                    (generation_free_list_space (gen) + generation_free_obj_space (gen)) -
26059                                    Align (size (generation_allocation_start (gen)));
26060         dd_survived_size (dd) = 0;
26061         dd_pinned_survived_size (dd) = 0;
26062         dd_artificial_pinned_survived_size (dd) = 0;
26063         dd_added_pinned_size (dd) = 0;
26064     }
26065
26066     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26067     PREFIX_ASSUME(seg != NULL);
26068
26069     while (seg)
26070     {
26071         seg->flags &= ~heap_segment_flags_swept;
26072
26073         if (heap_segment_allocated (seg) == heap_segment_mem (seg))
26074         {
26075             // This can't happen...
26076             FATAL_GC_ERROR();
26077         }
26078
26079         if (seg == ephemeral_heap_segment)
26080         {
26081             heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1));
26082         }
26083         else
26084         {
26085             heap_segment_background_allocated (seg) = heap_segment_allocated (seg);
26086         }
26087
26088         dprintf (2, ("seg %Ix background allocated is %Ix", 
26089                       heap_segment_mem (seg), 
26090                       heap_segment_background_allocated (seg)));
26091         seg = heap_segment_next_rw (seg);
26092     }
26093
26094     // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running
26095     // we can't let the user code consume the left over parts in these alloc contexts.
26096     repair_allocation_contexts (FALSE);
26097
26098 #ifdef TIME_GC
26099         finish = GetCycleCount32();
26100         mark_time = finish - start;
26101 #endif //TIME_GC
26102
26103     dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d", 
26104         generation_free_list_space (generation_of (max_generation)), 
26105         generation_free_obj_space (generation_of (max_generation))));
26106
26107     dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index)));
26108 }
26109
26110 void
26111 gc_heap::suspend_EE ()
26112 {
26113     dprintf (2, ("suspend_EE"));
26114 #ifdef MULTIPLE_HEAPS
26115     gc_heap* hp = gc_heap::g_heaps[0];
26116     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26117 #else
26118     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26119 #endif //MULTIPLE_HEAPS
26120 }
26121
26122 #ifdef MULTIPLE_HEAPS
26123 void
26124 gc_heap::bgc_suspend_EE ()
26125 {
26126     for (int i = 0; i < n_heaps; i++)
26127     {
26128         gc_heap::g_heaps[i]->reset_gc_done();
26129     }
26130     gc_started = TRUE;
26131     dprintf (2, ("bgc_suspend_EE"));
26132     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26133
26134     gc_started = FALSE;
26135     for (int i = 0; i < n_heaps; i++)
26136     {
26137         gc_heap::g_heaps[i]->set_gc_done();
26138     }
26139 }
26140 #else
26141 void
26142 gc_heap::bgc_suspend_EE ()
26143 {
26144     reset_gc_done();
26145     gc_started = TRUE;
26146     dprintf (2, ("bgc_suspend_EE"));
26147     GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26148     gc_started = FALSE;
26149     set_gc_done();
26150 }
26151 #endif //MULTIPLE_HEAPS
26152
26153 void
26154 gc_heap::restart_EE ()
26155 {
26156     dprintf (2, ("restart_EE"));
26157 #ifdef MULTIPLE_HEAPS
26158     GCToEEInterface::RestartEE(FALSE);
26159 #else
26160     GCToEEInterface::RestartEE(FALSE);
26161 #endif //MULTIPLE_HEAPS
26162 }
26163
26164 inline uint8_t* gc_heap::high_page ( heap_segment* seg, BOOL concurrent_p)
26165 {
26166     if (concurrent_p)
26167     {
26168         uint8_t* end = ((seg == ephemeral_heap_segment) ?
26169                      generation_allocation_start (generation_of (max_generation-1)) :
26170                      heap_segment_allocated (seg));
26171         return align_lower_page (end);
26172     }
26173     else 
26174     {
26175         return heap_segment_allocated (seg);
26176     }
26177 }
26178
26179 void gc_heap::revisit_written_page (uint8_t* page,
26180                                     uint8_t* end,
26181                                     BOOL concurrent_p,
26182                                     heap_segment* seg,
26183                                     uint8_t*& last_page,
26184                                     uint8_t*& last_object,
26185                                     BOOL large_objects_p,
26186                                     size_t& num_marked_objects)
26187 {
26188     UNREFERENCED_PARAMETER(seg);
26189
26190     uint8_t*   start_address = page;
26191     uint8_t*   o             = 0;
26192     int align_const = get_alignment_constant (!large_objects_p);
26193     uint8_t* high_address = end;
26194     uint8_t* current_lowest_address = background_saved_lowest_address;
26195     uint8_t* current_highest_address = background_saved_highest_address;
26196     BOOL no_more_loop_p = FALSE;
26197
26198     THREAD_FROM_HEAP;
26199 #ifndef MULTIPLE_HEAPS
26200     const int thread = heap_number;
26201 #endif //!MULTIPLE_HEAPS
26202
26203     if (large_objects_p)
26204     {
26205         o = last_object;
26206     }
26207     else
26208     {
26209         if (((last_page + WRITE_WATCH_UNIT_SIZE) == page)
26210             || (start_address <= last_object))
26211         {
26212             o = last_object;
26213         }
26214         else
26215         {
26216             o = find_first_object (start_address, last_object);
26217             // We can visit the same object again, but on a different page.
26218             assert (o >= last_object);
26219         }
26220     }
26221
26222     dprintf (3,("page %Ix start: %Ix, %Ix[ ",
26223                (size_t)page, (size_t)o,
26224                (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE))));
26225
26226     while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26227     {
26228         size_t s;
26229
26230         if (concurrent_p && large_objects_p)
26231         {
26232             bgc_alloc_lock->bgc_mark_set (o);
26233
26234             if (((CObjectHeader*)o)->IsFree())
26235             {
26236                 s = unused_array_size (o);
26237             }
26238             else
26239             {
26240                 s = size (o);
26241             }
26242         }
26243         else
26244         {
26245             s = size (o);
26246         }
26247
26248         dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm")));
26249
26250         assert (Align (s) >= Align (min_obj_size));
26251
26252         uint8_t* next_o =  o + Align (s, align_const);
26253
26254         if (next_o >= start_address) 
26255         {
26256 #ifdef MULTIPLE_HEAPS
26257             if (concurrent_p)
26258             {
26259                 // We set last_object here for SVR BGC here because SVR BGC has more than 
26260                 // one GC thread. When we have more than one GC thread we would run into this 
26261                 // situation if we skipped unmarked objects:
26262                 // bgc thread 1 calls GWW, and detect object X not marked so it would skip it 
26263                 // for revisit. 
26264                 // bgc thread 2 marks X and all its current children.
26265                 // user thread comes along and dirties more (and later) pages in X.
26266                 // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything
26267                 // on them because it had already skipped X. We need to detect that this object is now
26268                 // marked and mark the children on the dirtied pages.
26269                 // In the future if we have less BGC threads than we have heaps we should add
26270                 // the check to the number of BGC threads.
26271                 last_object = o;
26272             }
26273 #endif //MULTIPLE_HEAPS
26274
26275             if (contain_pointers (o) &&
26276                 (!((o >= current_lowest_address) && (o < current_highest_address)) ||
26277                 background_marked (o)))
26278             {
26279                 dprintf (3, ("going through %Ix", (size_t)o));
26280                 go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s),
26281                                     if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE))
26282                                     {
26283                                         no_more_loop_p = TRUE;
26284                                         goto end_limit;
26285                                     }
26286                                     uint8_t* oo = *poo;
26287
26288                                     num_marked_objects++;
26289                                     background_mark_object (oo THREAD_NUMBER_ARG);
26290                                 );
26291             }
26292             else if (
26293                 concurrent_p &&
26294 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // see comment below
26295                 large_objects_p &&
26296 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26297                 ((CObjectHeader*)o)->IsFree() &&
26298                 (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26299             {
26300                 // We need to not skip the object here because of this corner scenario:
26301                 // A large object was being allocated during BGC mark so we first made it 
26302                 // into a free object, then cleared its memory. In this loop we would detect
26303                 // that it's a free object which normally we would skip. But by the next time
26304                 // we call GetWriteWatch we could still be on this object and the object had
26305                 // been made into a valid object and some of its memory was changed. We need
26306                 // to be sure to process those written pages so we can't skip the object just
26307                 // yet.
26308                 //
26309                 // Similarly, when using software write watch, don't advance last_object when
26310                 // the current object is a free object that spans beyond the current page or
26311                 // high_address. Software write watch acquires gc_lock before the concurrent
26312                 // GetWriteWatch() call during revisit_written_pages(). A foreground GC may
26313                 // happen at that point and allocate from this free region, so when
26314                 // revisit_written_pages() continues, it cannot skip now-valid objects in this
26315                 // region.
26316                 no_more_loop_p = TRUE;
26317                 goto end_limit;                
26318             }
26319         }
26320 end_limit:
26321         if (concurrent_p && large_objects_p)
26322         {
26323             bgc_alloc_lock->bgc_mark_done ();
26324         }
26325         if (no_more_loop_p)
26326         {
26327             break;
26328         }
26329         o = next_o;
26330     }
26331
26332 #ifdef MULTIPLE_HEAPS
26333     if (concurrent_p)
26334     {
26335         assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)));
26336     }
26337     else
26338 #endif //MULTIPLE_HEAPS
26339     {
26340         last_object = o;
26341     }
26342
26343     dprintf (3,("Last object: %Ix", (size_t)last_object));
26344     last_page = align_write_watch_lower_page (o);
26345 }
26346
26347 // When reset_only_p is TRUE, we should only reset pages that are in range
26348 // because we need to consider the segments or part of segments that were
26349 // allocated out of range all live.
26350 void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
26351 {
26352 #ifdef WRITE_WATCH
26353     if (concurrent_p && !reset_only_p)
26354     {
26355         current_bgc_state = bgc_revisit_soh;
26356     }
26357
26358     size_t total_dirtied_pages = 0;
26359     size_t total_marked_objects = 0;
26360
26361     heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26362
26363     PREFIX_ASSUME(seg != NULL);
26364
26365     bool reset_watch_state = !!concurrent_p;
26366     bool is_runtime_suspended = !concurrent_p;
26367     BOOL small_object_segments = TRUE;
26368     int align_const = get_alignment_constant (small_object_segments);
26369
26370     while (1)
26371     {
26372         if (seg == 0)
26373         {
26374             if (small_object_segments)
26375             {
26376                 //switch to large segment
26377                 if (concurrent_p && !reset_only_p)
26378                 {
26379                     current_bgc_state = bgc_revisit_loh;
26380                 }
26381
26382                 if (!reset_only_p)
26383                 {
26384                     dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26385                     fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26386                     concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
26387                     total_dirtied_pages = 0;
26388                     total_marked_objects = 0;
26389                 }
26390
26391                 small_object_segments = FALSE;
26392                 //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");
26393
26394                 dprintf (3, ("now revisiting large object segments"));
26395                 align_const = get_alignment_constant (small_object_segments);
26396                 seg = heap_segment_rw (generation_start_segment (large_object_generation));
26397
26398                 PREFIX_ASSUME(seg != NULL);
26399
26400                 continue;
26401             }
26402             else
26403             {
26404                 if (reset_only_p)
26405                 {
26406                     dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages));
26407                 } 
26408                 else
26409                 {
26410                     dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26411                     fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26412                 }
26413                 break;
26414             }
26415         }
26416         uint8_t* base_address = (uint8_t*)heap_segment_mem (seg);
26417         //we need to truncate to the base of the page because
26418         //some newly allocated could exist beyond heap_segment_allocated
26419         //and if we reset the last page write watch status,
26420         // they wouldn't be guaranteed to be visited -> gc hole.
26421         uintptr_t bcount = array_size;
26422         uint8_t* last_page = 0;
26423         uint8_t* last_object = heap_segment_mem (seg);
26424         uint8_t* high_address = 0;
26425
26426         BOOL skip_seg_p = FALSE;
26427
26428         if (reset_only_p)
26429         {
26430             if ((heap_segment_mem (seg) >= background_saved_lowest_address) ||
26431                 (heap_segment_reserved (seg) <= background_saved_highest_address))
26432             {
26433                 dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number, 
26434                     heap_segment_mem (seg), heap_segment_reserved (seg)));
26435                 skip_seg_p = TRUE;
26436             }
26437         }
26438
26439         if (!skip_seg_p)
26440         {
26441             dprintf (3, ("looking at seg %Ix", (size_t)last_object));
26442
26443             if (reset_only_p)
26444             {
26445                 base_address = max (base_address, background_saved_lowest_address);
26446                 dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address));
26447             }
26448
26449             dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address, 
26450                 heap_segment_mem (seg), heap_segment_reserved (seg)));
26451
26452
26453             while (1)
26454             {
26455                 if (reset_only_p)
26456                 {
26457                     high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
26458                     high_address = min (high_address, background_saved_highest_address);
26459                 }
26460                 else
26461                 {
26462                     high_address = high_page (seg, concurrent_p);
26463                 }
26464
26465                 if ((base_address < high_address) &&
26466                     (bcount >= array_size))
26467                 {
26468                     ptrdiff_t region_size = high_address - base_address;
26469                     dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));
26470
26471 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26472                     // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan
26473                     // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is
26474                     // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint
26475                     // memory regions.
26476                     if (!is_runtime_suspended)
26477                     {
26478                         enter_spin_lock(&gc_lock);
26479                     }
26480 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26481
26482                     get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size,
26483                                                  (void**)background_written_addresses,
26484                                                  &bcount, is_runtime_suspended);
26485
26486 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26487                     if (!is_runtime_suspended)
26488                     {
26489                         leave_spin_lock(&gc_lock);
26490                     }
26491 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26492
26493                     if (bcount != 0)
26494                     {
26495                         total_dirtied_pages += bcount;
26496
26497                         dprintf (3, ("Found %d pages [%Ix, %Ix[", 
26498                                         bcount, (size_t)base_address, (size_t)high_address));
26499                     }
26500
26501                     if (!reset_only_p)
26502                     {
26503                         for (unsigned i = 0; i < bcount; i++)
26504                         {
26505                             uint8_t* page = (uint8_t*)background_written_addresses[i];
26506                             dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i, 
26507                                 (size_t)page, (size_t)high_address));
26508                             if (page < high_address)
26509                             {
26510                                 //search for marked objects in the page
26511                                 revisit_written_page (page, high_address, concurrent_p,
26512                                                     seg, last_page, last_object,
26513                                                     !small_object_segments,
26514                                                     total_marked_objects);
26515                             }
26516                             else
26517                             {
26518                                 dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address));
26519                                 assert (!"page shouldn't have exceeded limit");
26520                             }
26521                         }
26522                     }
26523
26524                     if (bcount >= array_size){
26525                         base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE;
26526                         bcount = array_size;
26527                     }
26528                 }
26529                 else
26530                 {
26531                     break;
26532                 }
26533             }
26534         }
26535
26536         seg = heap_segment_next_rw (seg);
26537     }
26538
26539 #endif //WRITE_WATCH
26540 }
26541
26542 void gc_heap::background_grow_c_mark_list()
26543 {
26544     assert (c_mark_list_index >= c_mark_list_length);
26545     BOOL should_drain_p = FALSE;
26546     THREAD_FROM_HEAP;
26547 #ifndef MULTIPLE_HEAPS
26548     const int thread = heap_number;
26549 #endif //!MULTIPLE_HEAPS
26550
26551     dprintf (2, ("stack copy buffer overflow"));
26552     uint8_t** new_c_mark_list = 0;
26553     {
26554         FAULT_NOT_FATAL();
26555         if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*))))
26556         {
26557             should_drain_p = TRUE;
26558         }
26559         else
26560         {
26561             new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2];
26562             if (new_c_mark_list == 0)
26563             {
26564                 should_drain_p = TRUE;
26565             }
26566         }
26567     }
26568     if (should_drain_p)
26569
26570     {
26571         dprintf (2, ("No more memory for the stacks copy, draining.."));
26572         //drain the list by marking its elements
26573         background_drain_mark_list (thread);
26574     }
26575     else
26576     {
26577         assert (new_c_mark_list);
26578         memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*));
26579         c_mark_list_length = c_mark_list_length*2;
26580         delete c_mark_list;
26581         c_mark_list = new_c_mark_list;
26582     }
26583 }
26584
26585 void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
26586                                   uint32_t flags)
26587 {
26588     UNREFERENCED_PARAMETER(sc);
26589     //in order to save space on the array, mark the object,
26590     //knowing that it will be visited later
26591     assert (settings.concurrent);
26592
26593     THREAD_NUMBER_FROM_CONTEXT;
26594 #ifndef MULTIPLE_HEAPS
26595     const int thread = 0;
26596 #endif //!MULTIPLE_HEAPS
26597
26598     uint8_t* o = (uint8_t*)*ppObject;
26599
26600     if (o == 0)
26601         return;
26602
26603     HEAP_FROM_THREAD;
26604
26605     gc_heap* hp = gc_heap::heap_of (o);
26606
26607     if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
26608     {
26609         return;
26610     }
26611
26612 #ifdef INTERIOR_POINTERS
26613     if (flags & GC_CALL_INTERIOR)
26614     {
26615         o = hp->find_object (o, hp->background_saved_lowest_address);
26616         if (o == 0)
26617             return;
26618     }
26619 #endif //INTERIOR_POINTERS
26620
26621 #ifdef FEATURE_CONSERVATIVE_GC
26622     // For conservative GC, a value on stack may point to middle of a free object.
26623     // In this case, we don't need to promote the pointer.
26624     if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
26625     {
26626         return;
26627     }
26628 #endif //FEATURE_CONSERVATIVE_GC
26629
26630 #ifdef _DEBUG
26631     ((CObjectHeader*)o)->Validate();
26632 #endif //_DEBUG
26633
26634     dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o));
26635     if (o && (size (o) > LARGE_OBJECT_SIZE))
26636     {
26637         dprintf (3, ("Brc %Ix", (size_t)o));
26638     }
26639
26640     if (hpt->c_mark_list_index >= hpt->c_mark_list_length)
26641     {
26642         hpt->background_grow_c_mark_list();
26643     }
26644     dprintf (3, ("pushing %08x into mark_list", (size_t)o));
26645     hpt->c_mark_list [hpt->c_mark_list_index++] = o;
26646
26647     STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
26648 }
26649
26650 void gc_heap::mark_absorb_new_alloc()
26651 {
26652     fix_allocation_contexts (FALSE);
26653     
26654     gen0_bricks_cleared = FALSE;
26655
26656     clear_gen0_bricks();
26657 }
26658
26659 BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
26660 {
26661     BOOL success = FALSE;
26662     BOOL thread_created = FALSE;
26663     dprintf (2, ("Preparing gc thread"));
26664     gh->bgc_threads_timeout_cs.Enter();
26665     if (!(gh->bgc_thread_running))
26666     {
26667         dprintf (2, ("GC thread not runnning"));
26668         if ((gh->bgc_thread == 0) && create_bgc_thread(gh))
26669         {
26670             success = TRUE;
26671             thread_created = TRUE;
26672         }
26673     }
26674     else
26675     {
26676         dprintf (3, ("GC thread already running"));
26677         success = TRUE;
26678     }
26679     gh->bgc_threads_timeout_cs.Leave();
26680
26681     if(thread_created)
26682         FIRE_EVENT(GCCreateConcurrentThread_V1);
26683
26684     return success;
26685 }
26686
26687 BOOL gc_heap::create_bgc_thread(gc_heap* gh)
26688 {
26689     assert (background_gc_done_event.IsValid());
26690
26691     //dprintf (2, ("Creating BGC thread"));
26692
26693     gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET Background GC");
26694     return gh->bgc_thread_running;
26695 }
26696
26697 BOOL gc_heap::create_bgc_threads_support (int number_of_heaps)
26698 {
26699     BOOL ret = FALSE;
26700     dprintf (3, ("Creating concurrent GC thread for the first time"));
26701     if (!background_gc_done_event.CreateManualEventNoThrow(TRUE))
26702     {
26703         goto cleanup;
26704     }
26705     if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE))
26706     {
26707         goto cleanup;
26708     }
26709     if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE))
26710     {
26711         goto cleanup;
26712     }
26713     if (!bgc_start_event.CreateManualEventNoThrow(FALSE))
26714     {
26715         goto cleanup;
26716     }
26717
26718 #ifdef MULTIPLE_HEAPS
26719     bgc_t_join.init (number_of_heaps, join_flavor_bgc);
26720 #else
26721     UNREFERENCED_PARAMETER(number_of_heaps);
26722 #endif //MULTIPLE_HEAPS
26723
26724     ret = TRUE;
26725
26726 cleanup:
26727
26728     if (!ret)
26729     {
26730         if (background_gc_done_event.IsValid())
26731         {
26732             background_gc_done_event.CloseEvent();
26733         }
26734         if (bgc_threads_sync_event.IsValid())
26735         {
26736             bgc_threads_sync_event.CloseEvent();
26737         }
26738         if (ee_proceed_event.IsValid())
26739         {
26740             ee_proceed_event.CloseEvent();
26741         }
26742         if (bgc_start_event.IsValid())
26743         {
26744             bgc_start_event.CloseEvent();
26745         }
26746     }
26747
26748     return ret;
26749 }
26750
26751 BOOL gc_heap::create_bgc_thread_support()
26752 {
26753     BOOL ret = FALSE;
26754     uint8_t** parr;
26755     
26756     if (!gc_lh_block_event.CreateManualEventNoThrow(FALSE))
26757     {
26758         goto cleanup;
26759     }
26760
26761     //needs to have room for enough smallest objects fitting on a page
26762     parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE];
26763     if (!parr)
26764     {
26765         goto cleanup;
26766     }
26767
26768     make_c_mark_list (parr);
26769
26770     ret = TRUE;
26771
26772 cleanup:
26773
26774     if (!ret)
26775     {
26776         if (gc_lh_block_event.IsValid())
26777         {
26778             gc_lh_block_event.CloseEvent();
26779         }
26780     }
26781
26782     return ret;
26783 }
26784
26785 int gc_heap::check_for_ephemeral_alloc()
26786 {
26787     int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1);
26788
26789     if (gen == -1)
26790     {
26791 #ifdef MULTIPLE_HEAPS
26792         for (int heap_index = 0; heap_index < n_heaps; heap_index++)
26793 #endif //MULTIPLE_HEAPS
26794         {
26795             for (int i = 0; i <= (max_generation - 1); i++)
26796             {
26797 #ifdef MULTIPLE_HEAPS
26798                 if (g_heaps[heap_index]->get_new_allocation (i) <= 0)
26799 #else
26800                 if (get_new_allocation (i) <= 0)
26801 #endif //MULTIPLE_HEAPS
26802                 {
26803                     gen = max (gen, i);
26804                 }
26805                 else
26806                     break;
26807             }
26808         }
26809     }
26810
26811     return gen;
26812 }
26813
26814 // Wait for gc to finish sequential part
26815 void gc_heap::wait_to_proceed()
26816 {
26817     assert (background_gc_done_event.IsValid());
26818     assert (bgc_start_event.IsValid());
26819
26820     user_thread_wait(&ee_proceed_event, FALSE);
26821 }
26822
26823 // Start a new concurrent gc
26824 void gc_heap::start_c_gc()
26825 {
26826     assert (background_gc_done_event.IsValid());
26827     assert (bgc_start_event.IsValid());
26828
26829 //Need to make sure that the gc thread is in the right place.
26830     background_gc_done_event.Wait(INFINITE, FALSE);
26831     background_gc_done_event.Reset();
26832     bgc_start_event.Set();
26833 }
26834
26835 void gc_heap::do_background_gc()
26836 {
26837     dprintf (2, ("starting a BGC"));
26838 #ifdef MULTIPLE_HEAPS
26839     for (int i = 0; i < n_heaps; i++)
26840     {
26841         g_heaps[i]->init_background_gc();
26842     }
26843 #else
26844     init_background_gc();
26845 #endif //MULTIPLE_HEAPS
26846     //start the background gc
26847     start_c_gc ();
26848
26849     //wait until we get restarted by the BGC.
26850     wait_to_proceed();
26851 }
26852
26853 void gc_heap::kill_gc_thread()
26854 {
26855     //assert (settings.concurrent == FALSE);
26856
26857     // We are doing a two-stage shutdown now.
26858     // In the first stage, we do minimum work, and call ExitProcess at the end.
26859     // In the secodn stage, we have the Loader lock and only one thread is
26860     // alive.  Hence we do not need to kill gc thread.
26861     background_gc_done_event.CloseEvent();
26862     gc_lh_block_event.CloseEvent();
26863     bgc_start_event.CloseEvent();
26864     bgc_threads_timeout_cs.Destroy();
26865     bgc_thread = 0;
26866     recursive_gc_sync::shutdown();
26867 }
26868
26869 void gc_heap::bgc_thread_function()
26870 {
26871     assert (background_gc_done_event.IsValid());
26872     assert (bgc_start_event.IsValid());
26873
26874     dprintf (3, ("gc_thread thread starting..."));
26875
26876     BOOL do_exit = FALSE;
26877
26878     bool cooperative_mode = true;
26879     bgc_thread_id.SetToCurrentThread();
26880     dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()));
26881     while (1)
26882     {
26883         // Wait for work to do...
26884         dprintf (3, ("bgc thread: waiting..."));
26885
26886         cooperative_mode = enable_preemptive ();
26887         //current_thread->m_fPreemptiveGCDisabled = 0;
26888
26889         uint32_t result = bgc_start_event.Wait(
26890 #ifdef _DEBUG
26891 #ifdef MULTIPLE_HEAPS
26892                                              INFINITE,
26893 #else
26894                                              2000,
26895 #endif //MULTIPLE_HEAPS
26896 #else //_DEBUG
26897 #ifdef MULTIPLE_HEAPS
26898                                              INFINITE,
26899 #else
26900                                              20000,
26901 #endif //MULTIPLE_HEAPS
26902 #endif //_DEBUG
26903             FALSE);
26904         dprintf (2, ("gc thread: finished waiting"));
26905
26906         // not calling disable_preemptive here 'cause we 
26907         // can't wait for GC complete here - RestartEE will be called 
26908         // when we've done the init work.
26909
26910         if (result == WAIT_TIMEOUT)
26911         {
26912             // Should join the bgc threads and terminate all of them
26913             // at once.
26914             dprintf (1, ("GC thread timeout"));
26915             bgc_threads_timeout_cs.Enter();
26916             if (!keep_bgc_threads_p)
26917             {
26918                 dprintf (2, ("GC thread exiting"));
26919                 bgc_thread_running = FALSE;
26920                 bgc_thread = 0;
26921                 bgc_thread_id.Clear();
26922                 do_exit = TRUE;
26923             }
26924             bgc_threads_timeout_cs.Leave();
26925             if (do_exit)
26926                 break;
26927             else
26928             {
26929                 dprintf (3, ("GC thread needed, not exiting"));
26930                 continue;
26931             }
26932         }
26933         // if we signal the thread with no concurrent work to do -> exit
26934         if (!settings.concurrent)
26935         {
26936             dprintf (3, ("no concurrent GC needed, exiting"));
26937             break;
26938         }
26939 #ifdef TRACE_GC
26940         //trace_gc = TRUE;
26941 #endif //TRACE_GC
26942         recursive_gc_sync::begin_background();
26943         dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d", 
26944             generation_free_list_space (generation_of (max_generation)),
26945             generation_free_obj_space (generation_of (max_generation)),
26946             dd_fragmentation (dynamic_data_of (max_generation))));
26947
26948         gc1();
26949
26950         current_bgc_state = bgc_not_in_process;
26951
26952 #ifdef TRACE_GC
26953         //trace_gc = FALSE;
26954 #endif //TRACE_GC
26955
26956         enable_preemptive ();
26957 #ifdef MULTIPLE_HEAPS
26958         bgc_t_join.join(this, gc_join_done);
26959         if (bgc_t_join.joined())
26960 #endif //MULTIPLE_HEAPS
26961         {
26962             enter_spin_lock (&gc_lock);
26963             dprintf (SPINLOCK_LOG, ("bgc Egc"));
26964             
26965             bgc_start_event.Reset();
26966             do_post_gc();
26967 #ifdef MULTIPLE_HEAPS
26968             for (int gen = max_generation; gen <= (max_generation + 1); gen++)
26969             {
26970                 size_t desired_per_heap = 0;
26971                 size_t total_desired = 0;
26972                 gc_heap* hp = 0;
26973                 dynamic_data* dd;
26974                 for (int i = 0; i < n_heaps; i++)
26975                 {
26976                     hp = g_heaps[i];
26977                     dd = hp->dynamic_data_of (gen);
26978                     size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
26979                     if (temp_total_desired < total_desired)
26980                     {
26981                         // we overflowed.
26982                         total_desired = (size_t)MAX_PTR;
26983                         break;
26984                     }
26985                     total_desired = temp_total_desired;
26986                 }
26987
26988                 desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE));
26989
26990                 for (int i = 0; i < n_heaps; i++)
26991                 {
26992                     hp = gc_heap::g_heaps[i];
26993                     dd = hp->dynamic_data_of (gen);
26994                     dd_desired_allocation (dd) = desired_per_heap;
26995                     dd_gc_new_allocation (dd) = desired_per_heap;
26996                     dd_new_allocation (dd) = desired_per_heap;
26997                 }
26998             }
26999 #endif //MULTIPLE_HEAPS
27000 #ifdef MULTIPLE_HEAPS
27001             fire_pevents();
27002 #endif //MULTIPLE_HEAPS
27003
27004             c_write (settings.concurrent, FALSE);
27005             recursive_gc_sync::end_background();
27006             keep_bgc_threads_p = FALSE;
27007             background_gc_done_event.Set();
27008
27009             dprintf (SPINLOCK_LOG, ("bgc Lgc"));
27010             leave_spin_lock (&gc_lock);
27011 #ifdef MULTIPLE_HEAPS
27012             dprintf(1, ("End of BGC - starting all BGC threads"));
27013             bgc_t_join.restart();
27014 #endif //MULTIPLE_HEAPS
27015         }
27016         // We can't disable preempt here because there might've been a GC already
27017         // started and decided to do a BGC and waiting for a BGC thread to restart 
27018         // vm. That GC will be waiting in wait_to_proceed and we are waiting for it
27019         // to restart the VM so we deadlock.
27020         //gc_heap::disable_preemptive (current_thread, TRUE);
27021     }
27022
27023     FIRE_EVENT(GCTerminateConcurrentThread_V1);
27024
27025     dprintf (3, ("bgc_thread thread exiting"));
27026     return;
27027 }
27028
27029 #endif //BACKGROUND_GC
27030
27031 //Clear the cards [start_card, end_card[
27032 void gc_heap::clear_cards (size_t start_card, size_t end_card)
27033 {
27034     if (start_card < end_card)
27035     {
27036         size_t start_word = card_word (start_card);
27037         size_t end_word = card_word (end_card);
27038         if (start_word < end_word)
27039         {
27040             // Figure out the bit positions of the cards within their words
27041             unsigned bits = card_bit (start_card);
27042             card_table [start_word] &= lowbits (~0, bits);
27043             for (size_t i = start_word+1; i < end_word; i++)
27044                 card_table [i] = 0;
27045             bits = card_bit (end_card);
27046             // Don't write beyond end_card (and possibly uncommitted card table space).
27047             if (bits != 0)
27048             {
27049                 card_table [end_word] &= highbits (~0, bits);
27050             }
27051         }
27052         else
27053         {
27054             // If the start and end cards are in the same word, just clear the appropriate card
27055             // bits in that word.
27056             card_table [start_word] &= (lowbits (~0, card_bit (start_card)) |
27057                                         highbits (~0, card_bit (end_card)));
27058         }
27059 #ifdef VERYSLOWDEBUG
27060         size_t  card = start_card;
27061         while (card < end_card)
27062         {
27063             assert (! (card_set_p (card)));
27064             card++;
27065         }
27066 #endif //VERYSLOWDEBUG
27067         dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[",
27068                   start_card, (size_t)card_address (start_card),
27069                   end_card, (size_t)card_address (end_card)));
27070     }
27071 }
27072
27073 void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address)
27074 {
27075     size_t   start_card = card_of (align_on_card (start_address));
27076     size_t   end_card = card_of (align_lower_card (end_address));
27077     clear_cards (start_card, end_card);
27078 }
27079
27080 // copy [srccard, ...[ to [dst_card, end_card[
27081 // This will set the same bit twice. Can be optimized.
27082 inline
27083 void gc_heap::copy_cards (size_t dst_card,
27084                           size_t src_card,
27085                           size_t end_card, 
27086                           BOOL nextp)
27087 {
27088     // If the range is empty, this function is a no-op - with the subtlety that
27089     // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be
27090     // outside the committed region.  To avoid the access, leave early.
27091     if (!(dst_card < end_card))
27092         return;
27093
27094     unsigned int srcbit = card_bit (src_card);
27095     unsigned int dstbit = card_bit (dst_card);
27096     size_t srcwrd = card_word (src_card);
27097     size_t dstwrd = card_word (dst_card);
27098     unsigned int srctmp = card_table[srcwrd];
27099     unsigned int dsttmp = card_table[dstwrd];
27100
27101     for (size_t card = dst_card; card < end_card; card++)
27102     {
27103         if (srctmp & (1 << srcbit))
27104             dsttmp |= 1 << dstbit;
27105         else
27106             dsttmp &= ~(1 << dstbit);
27107
27108         if (!(++srcbit % 32))
27109         {
27110             srctmp = card_table[++srcwrd];
27111             srcbit = 0;
27112         }
27113
27114         if (nextp)
27115         {
27116             if (srctmp & (1 << srcbit))
27117                 dsttmp |= 1 << dstbit;
27118         }
27119
27120         if (!(++dstbit % 32))
27121         {
27122             card_table[dstwrd] = dsttmp;
27123
27124 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27125             if (dsttmp != 0)
27126             {
27127                 card_bundle_set(cardw_card_bundle(dstwrd));
27128             }
27129 #endif
27130
27131             dstwrd++;
27132             dsttmp = card_table[dstwrd];
27133             dstbit = 0;
27134         }
27135     }
27136
27137     card_table[dstwrd] = dsttmp;
27138
27139 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27140     if (dsttmp != 0)
27141     {
27142         card_bundle_set(cardw_card_bundle(dstwrd));
27143     }
27144 #endif
27145 }
27146
27147 void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27148 {
27149     ptrdiff_t relocation_distance = src - dest;
27150     size_t start_dest_card = card_of (align_on_card (dest));
27151     size_t end_dest_card = card_of (dest + len - 1);
27152     size_t dest_card = start_dest_card;
27153     size_t src_card = card_of (card_address (dest_card)+relocation_distance);
27154     dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ",
27155                  src_card, (size_t)src, dest_card, (size_t)dest));
27156     dprintf (3,(" %Ix->%Ix:%Ix[",
27157               (size_t)src+len, end_dest_card, (size_t)dest+len));
27158
27159     dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix",
27160         dest, src, len, relocation_distance, (align_on_card (dest))));
27161
27162     dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix",
27163         start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest)));
27164
27165     //First card has two boundaries
27166     if (start_dest_card != card_of (dest))
27167     {
27168         if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&&
27169             card_set_p (card_of (card_address (start_dest_card) + relocation_distance)))
27170         {
27171             dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix",
27172                     (card_address (start_dest_card) + relocation_distance),
27173                     card_of (card_address (start_dest_card) + relocation_distance),
27174                     (src + len - 1),
27175                     card_of (src + len - 1)));
27176
27177             dprintf (3, ("setting card: %Ix", card_of (dest)));
27178             set_card (card_of (dest));
27179         }
27180     }
27181
27182     if (card_set_p (card_of (src)))
27183         set_card (card_of (dest));
27184
27185
27186     copy_cards (dest_card, src_card, end_dest_card,
27187                 ((dest - align_lower_card (dest)) != (src - align_lower_card (src))));
27188
27189     //Last card has two boundaries.
27190     if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) &&
27191         card_set_p (card_of (card_address (end_dest_card) + relocation_distance)))
27192     {
27193         dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix",
27194                 (card_address (end_dest_card) + relocation_distance),
27195                 card_of (card_address (end_dest_card) + relocation_distance),
27196                 src,
27197                 card_of (src)));
27198
27199         dprintf (3, ("setting card: %Ix", end_dest_card));
27200         set_card (end_dest_card);
27201     }
27202
27203     if (card_set_p (card_of (src + len - 1)))
27204         set_card (end_dest_card);
27205
27206 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27207     card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card))));
27208 #endif
27209 }
27210
27211 #ifdef BACKGROUND_GC
27212 // this does not need the Interlocked version of mark_array_set_marked.
27213 void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27214 {
27215     dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[",
27216                  (size_t)src, (size_t)dest,
27217                  (size_t)src+len, (size_t)dest+len));
27218
27219     uint8_t* src_o = src;
27220     uint8_t* dest_o;
27221     uint8_t* src_end = src + len;
27222     int align_const = get_alignment_constant (TRUE);
27223     ptrdiff_t reloc = dest - src;
27224
27225     while (src_o < src_end)
27226     {
27227         uint8_t*  next_o = src_o + Align (size (src_o), align_const);
27228
27229         if (background_object_marked (src_o, TRUE))
27230         {
27231             dest_o = src_o + reloc;
27232
27233             //if (background_object_marked (dest_o, FALSE))
27234             //{
27235             //    dprintf (3, ("*%Ix shouldn't have already been marked!", (size_t)(dest_o)));
27236             //    FATAL_GC_ERROR();
27237             //}
27238
27239             background_mark (dest_o, 
27240                              background_saved_lowest_address, 
27241                              background_saved_highest_address);
27242             dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o)));
27243         }
27244
27245         src_o = next_o;
27246     }
27247 }
27248 #endif //BACKGROUND_GC
27249
27250 void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o)
27251 {
27252     size_t new_current_brick = brick_of (o);
27253     set_brick (new_current_brick,
27254                (o - brick_address (new_current_brick)));
27255     size_t b = 1 + new_current_brick;
27256     size_t limit = brick_of (next_o);
27257     //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)",
27258     dprintf(3,("b:%Ix->%Ix-%Ix", 
27259                new_current_brick, (size_t)o, (size_t)next_o));
27260     while (b < limit)
27261     {
27262         set_brick (b,(new_current_brick - b));
27263         b++;
27264     }
27265 }
27266
27267 // start can not be >= heap_segment_allocated for the segment.
27268 uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
27269 {
27270     size_t brick = brick_of (start);
27271     uint8_t* o = 0;
27272     //last_object == null -> no search shortcut needed
27273     if ((brick == brick_of (first_object) || (start <= first_object)))
27274     {
27275         o = first_object;
27276     }
27277     else
27278     {
27279         ptrdiff_t  min_brick = (ptrdiff_t)brick_of (first_object);
27280         ptrdiff_t  prev_brick = (ptrdiff_t)brick - 1;
27281         int         brick_entry = 0;
27282         while (1)
27283         {
27284             if (prev_brick < min_brick)
27285             {
27286                 break;
27287             }
27288             if ((brick_entry = get_brick_entry(prev_brick)) >= 0)
27289             {
27290                 break;
27291             }
27292             assert (! ((brick_entry == 0)));
27293             prev_brick = (brick_entry + prev_brick);
27294
27295         }
27296         o = ((prev_brick < min_brick) ? first_object :
27297                       brick_address (prev_brick) + brick_entry - 1);
27298         assert (o <= start);
27299     }
27300
27301     assert (Align (size (o)) >= Align (min_obj_size));
27302     uint8_t*  next_o = o + Align (size (o));
27303     size_t curr_cl = (size_t)next_o / brick_size;
27304     size_t min_cl = (size_t)first_object / brick_size;
27305
27306     //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
27307 #ifdef TRACE_GC
27308     unsigned int n_o = 1;
27309 #endif //TRACE_GC
27310
27311     uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27312
27313     while (next_o <= start)
27314     {
27315         do
27316         {
27317 #ifdef TRACE_GC
27318             n_o++;
27319 #endif //TRACE_GC
27320             o = next_o;
27321             assert (Align (size (o)) >= Align (min_obj_size));
27322             next_o = o + Align (size (o));
27323             Prefetch (next_o);
27324         }while (next_o < next_b);
27325
27326         if (((size_t)next_o / brick_size) != curr_cl)
27327         {
27328             if (curr_cl >= min_cl)
27329             {
27330                 fix_brick_to_highest (o, next_o);
27331             }
27332             curr_cl = (size_t) next_o / brick_size;
27333         }
27334         next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27335     }
27336
27337     size_t bo = brick_of (o);
27338     //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix", 
27339     dprintf (3, ("%Id o, [%Ix-[%Ix", 
27340         n_o, bo, brick));
27341     if (bo < brick)
27342     {
27343         set_brick (bo, (o - brick_address(bo)));
27344         size_t b = 1 + bo;
27345         int x = -1;
27346         while (b < brick)
27347         {
27348             set_brick (b,x--);
27349             b++;
27350         }
27351     }
27352
27353     return o;
27354 }
27355
27356 #ifdef CARD_BUNDLE
27357
27358 // Find the first non-zero card word between cardw and cardw_end.
27359 // The index of the word we find is returned in cardw.
27360 BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
27361 {
27362     dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix",
27363                  dd_collection_count (dynamic_data_of (0)), cardw, cardw_end));
27364
27365     if (card_bundles_enabled())
27366     {
27367         size_t cardb = cardw_card_bundle (cardw);
27368         size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end));
27369         while (1)
27370         {
27371             // Find a non-zero bundle
27372             while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0))
27373             {
27374                 cardb++;
27375             }
27376
27377             if (cardb == end_cardb)
27378                 return FALSE;
27379
27380             // We found a bundle, so go through its words and find a non-zero card word
27381             uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)];
27382             uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)];
27383             while ((card_word < card_word_end) && !(*card_word))
27384             {
27385                 card_word++;
27386             }
27387
27388             if (card_word != card_word_end)
27389             {
27390                 cardw = (card_word - &card_table[0]);
27391                 return TRUE;
27392             }
27393             else if ((cardw <= card_bundle_cardw (cardb)) &&
27394                      (card_word == &card_table [card_bundle_cardw (cardb+1)]))
27395             {
27396                 // a whole bundle was explored and is empty
27397                 dprintf  (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[",
27398                         dd_collection_count (dynamic_data_of (0)), 
27399                         cardb, card_bundle_cardw (cardb),
27400                         card_bundle_cardw (cardb+1)));
27401                 card_bundle_clear (cardb);
27402             }
27403
27404             cardb++;
27405         }
27406     }
27407     else
27408     {
27409         uint32_t* card_word = &card_table[cardw];
27410         uint32_t* card_word_end = &card_table [cardw_end];
27411
27412         while (card_word < card_word_end)
27413         {
27414             if (*card_word != 0)
27415             {
27416                 cardw = (card_word - &card_table [0]);
27417                 return TRUE;
27418             }
27419
27420             card_word++;
27421         }
27422
27423         return FALSE;
27424     }
27425 }
27426
27427 #endif //CARD_BUNDLE
27428
27429 // Find cards that are set between two points in a card table.
27430 // Parameters
27431 //     card_table    : The card table.
27432 //     card          : [in/out] As input, the card to start searching from.
27433 //                              As output, the first card that's set.
27434 //     card_word_end : The card word at which to stop looking.
27435 //     end_card      : [out] The last card which is set.
27436 BOOL gc_heap::find_card(uint32_t* card_table,
27437                         size_t&   card,
27438                         size_t    card_word_end,
27439                         size_t&   end_card)
27440 {
27441     uint32_t* last_card_word;
27442     uint32_t card_word_value;
27443     uint32_t bit_position;
27444     
27445     // Find the first card which is set
27446     last_card_word = &card_table [card_word (card)];
27447     bit_position = card_bit (card);
27448     card_word_value = (*last_card_word) >> bit_position;
27449     if (!card_word_value)
27450     {
27451         bit_position = 0;
27452 #ifdef CARD_BUNDLE
27453         // Using the card bundle, go through the remaining card words between here and 
27454         // card_word_end until we find one that is non-zero.
27455         size_t lcw = card_word(card) + 1;
27456         if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE)
27457         {
27458             return FALSE;
27459         }
27460         else
27461         {
27462             last_card_word = &card_table [lcw];
27463             card_word_value = *last_card_word;
27464         }
27465
27466 #else //CARD_BUNDLE
27467         // Go through the remaining card words between here and card_word_end until we find
27468         // one that is non-zero.
27469         do
27470         {
27471             ++last_card_word;
27472         }
27473         while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word));
27474
27475         if (last_card_word < &card_table [card_word_end])
27476         {
27477             card_word_value = *last_card_word;
27478         }
27479         else
27480         {
27481             // We failed to find any non-zero card words before we got to card_word_end
27482             return FALSE;
27483         }
27484 #endif //CARD_BUNDLE
27485     }
27486
27487     // Look for the lowest bit set
27488     if (card_word_value)
27489     {
27490         while (!(card_word_value & 1))
27491         {
27492             bit_position++;
27493             card_word_value = card_word_value / 2;
27494         }
27495     }
27496     
27497     // card is the card word index * card size + the bit index within the card
27498     card = (last_card_word - &card_table[0]) * card_word_width + bit_position;
27499
27500     do
27501     {
27502         // Keep going until we get to an un-set card.
27503         bit_position++;
27504         card_word_value = card_word_value / 2;
27505
27506         // If we reach the end of the card word and haven't hit a 0 yet, start going
27507         // card word by card word until we get to one that's not fully set (0xFFFF...)
27508         // or we reach card_word_end.
27509         if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end]))
27510         {
27511             do
27512             {
27513                 card_word_value = *(++last_card_word);
27514             } while ((last_card_word < &card_table [card_word_end]) &&
27515
27516 #ifdef _MSC_VER
27517                      (card_word_value == (1 << card_word_width)-1)
27518 #else
27519                      // if left shift count >= width of type,
27520                      // gcc reports error.
27521                      (card_word_value == ~0u)
27522 #endif // _MSC_VER
27523                 );
27524             bit_position = 0;
27525         }
27526     } while (card_word_value & 1);
27527
27528     end_card = (last_card_word - &card_table [0])* card_word_width + bit_position;
27529     
27530     //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card));
27531     dprintf (3, ("fc: [%Ix, %Ix[", card, end_card));
27532     return TRUE;
27533 }
27534
27535
27536     //because of heap expansion, computing end is complicated.
27537 uint8_t* compute_next_end (heap_segment* seg, uint8_t* low)
27538 {
27539     if ((low >=  heap_segment_mem (seg)) &&
27540         (low < heap_segment_allocated (seg)))
27541         return low;
27542     else
27543         return heap_segment_allocated (seg);
27544 }
27545
27546 uint8_t*
27547 gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
27548                                 BOOL relocating)
27549 {
27550     UNREFERENCED_PARAMETER(low);
27551
27552     //when relocating, the fault line is the plan start of the younger
27553     //generation because the generation is promoted.
27554     if (relocating && (gen_number == (settings.condemned_generation + 1)))
27555     {
27556         generation* gen = generation_of (gen_number - 1);
27557         uint8_t* gen_alloc = generation_plan_allocation_start (gen);
27558         assert (gen_alloc);
27559         return gen_alloc;
27560     }
27561     else
27562     {
27563         assert (gen_number > settings.condemned_generation);
27564         return generation_allocation_start (generation_of (gen_number - 1 ));
27565     }
27566
27567 }
27568
27569 inline void
27570 gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
27571                          size_t& cg_pointers_found)
27572 {
27573     THREAD_FROM_HEAP;
27574     if ((gc_low <= o) && (gc_high > o))
27575     {
27576         n_gen++;
27577     }
27578 #ifdef MULTIPLE_HEAPS
27579     else if (o)
27580     {
27581         gc_heap* hp = heap_of (o);
27582         if (hp != this)
27583         {
27584             if ((hp->gc_low <= o) &&
27585                 (hp->gc_high > o))
27586             {
27587                 n_gen++;
27588             }
27589         }
27590     }
27591 #endif //MULTIPLE_HEAPS
27592     cg_pointers_found ++;
27593     dprintf (4, ("keep card live for %Ix", o));
27594 }
27595
27596 inline void
27597 gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen,
27598                                     size_t& cg_pointers_found,
27599                                     card_fn fn, uint8_t* nhigh,
27600                                     uint8_t* next_boundary)
27601 {
27602     THREAD_FROM_HEAP;
27603     if ((gc_low <= *poo) && (gc_high > *poo))
27604     {
27605         n_gen++;
27606         call_fn(fn) (poo THREAD_NUMBER_ARG);
27607     }
27608 #ifdef MULTIPLE_HEAPS
27609     else if (*poo)
27610     {
27611         gc_heap* hp = heap_of_gc (*poo);
27612         if (hp != this)
27613         {
27614             if ((hp->gc_low <= *poo) &&
27615                 (hp->gc_high > *poo))
27616             {
27617                 n_gen++;
27618                 call_fn(fn) (poo THREAD_NUMBER_ARG);
27619             }
27620             if ((fn == &gc_heap::relocate_address) ||
27621                 ((hp->ephemeral_low <= *poo) &&
27622                  (hp->ephemeral_high > *poo)))
27623             {
27624                 cg_pointers_found++;
27625             }
27626         }
27627     }
27628 #endif //MULTIPLE_HEAPS
27629     if ((next_boundary <= *poo) && (nhigh > *poo))
27630     {
27631         cg_pointers_found ++;
27632         dprintf (4, ("cg pointer %Ix found, %Id so far",
27633                      (size_t)*poo, cg_pointers_found ));
27634
27635     }
27636 }
27637
27638 BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
27639                                size_t& cg_pointers_found, 
27640                                size_t& n_eph, size_t& n_card_set,
27641                                size_t& card, size_t& end_card,
27642                                BOOL& foundp, uint8_t*& start_address,
27643                                uint8_t*& limit, size_t& n_cards_cleared)
27644 {
27645     dprintf (3, ("pointer %Ix past card %Ix", (size_t)po, (size_t)card));
27646     dprintf (3, ("ct: %Id cg", cg_pointers_found));
27647     BOOL passed_end_card_p = FALSE;
27648     foundp = FALSE;
27649
27650     if (cg_pointers_found == 0)
27651     {
27652         //dprintf(3,(" Clearing cards [%Ix, %Ix[ ",
27653         dprintf(3,(" CC [%Ix, %Ix[ ",
27654                 (size_t)card_address(card), (size_t)po));
27655         clear_cards (card, card_of(po));
27656         n_card_set -= (card_of (po) - card);
27657         n_cards_cleared += (card_of (po) - card);
27658
27659     }
27660     n_eph +=cg_pointers_found;
27661     cg_pointers_found = 0;
27662     card = card_of (po);
27663     if (card >= end_card)
27664     {
27665         passed_end_card_p = TRUE;
27666         dprintf (3, ("card %Ix exceeding end_card %Ix",
27667                     (size_t)card, (size_t)end_card));
27668         foundp = find_card (card_table, card, card_word_end, end_card);
27669         if (foundp)
27670         {
27671             n_card_set+= end_card - card;
27672             start_address = card_address (card);
27673             dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix",
27674                         (size_t)card, (size_t)start_address,
27675                         (size_t)card_address (end_card)));
27676         }
27677         limit = min (end, card_address (end_card));
27678
27679         assert (!((limit == card_address (end_card))&&
27680                 card_set_p (end_card)));
27681     }
27682
27683     return passed_end_card_p;
27684 }
27685
27686 void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
27687 {
27688 #ifdef BACKGROUND_GC
27689     dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)",
27690                  current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start));
27691
27692     heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
27693     PREFIX_ASSUME(soh_seg != NULL);
27694
27695     while (soh_seg)
27696     {
27697         dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix", 
27698             soh_seg, 
27699             heap_segment_background_allocated (soh_seg),
27700             heap_segment_allocated (soh_seg)));
27701
27702         soh_seg = heap_segment_next_rw (soh_seg);
27703     }
27704 #endif //BACKGROUND_GC
27705
27706     uint8_t* low = gc_low;
27707     uint8_t* high = gc_high;
27708     size_t end_card = 0;
27709
27710     generation*   oldest_gen        = generation_of (max_generation);
27711     int           curr_gen_number   = max_generation;
27712     uint8_t*      gen_boundary      = generation_allocation_start(generation_of(curr_gen_number - 1));
27713     uint8_t*      next_boundary     = compute_next_boundary(gc_low, curr_gen_number, relocating);
27714     
27715     heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
27716     PREFIX_ASSUME(seg != NULL);
27717
27718     uint8_t*      beg               = generation_allocation_start (oldest_gen);
27719     uint8_t*      end               = compute_next_end (seg, low);
27720     uint8_t*      last_object       = beg;
27721
27722     size_t  cg_pointers_found = 0;
27723
27724     size_t  card_word_end = (card_of (align_on_card_word (end)) / card_word_width);
27725
27726     size_t        n_eph             = 0;
27727     size_t        n_gen             = 0;
27728     size_t        n_card_set        = 0;
27729     uint8_t*      nhigh             = (relocating ? heap_segment_plan_allocated (ephemeral_heap_segment) : high);
27730
27731     BOOL          foundp            = FALSE;
27732     uint8_t*      start_address     = 0;
27733     uint8_t*      limit             = 0;
27734     size_t        card              = card_of (beg);
27735 #ifdef BACKGROUND_GC
27736     BOOL consider_bgc_mark_p        = FALSE;
27737     BOOL check_current_sweep_p      = FALSE;
27738     BOOL check_saved_sweep_p        = FALSE;
27739     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
27740 #endif //BACKGROUND_GC
27741
27742     dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end));
27743     size_t total_cards_cleared = 0;
27744
27745     while (1)
27746     {
27747         if (card_of(last_object) > card)
27748         {
27749             // cg means cross-generational
27750             dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
27751             if (cg_pointers_found == 0)
27752             {
27753                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object));
27754                 clear_cards (card, card_of(last_object));
27755                 n_card_set -= (card_of (last_object) - card);
27756                 total_cards_cleared += (card_of (last_object) - card);
27757             }
27758
27759             n_eph += cg_pointers_found;
27760             cg_pointers_found = 0;
27761             card = card_of (last_object);
27762         }
27763
27764         if (card >= end_card)
27765         {
27766             // Find the first card that's set (between card and card_word_end)
27767             foundp = find_card(card_table, card, card_word_end, end_card);
27768             if (foundp)
27769             {
27770                 // We found card(s) set. 
27771                 n_card_set += end_card - card;
27772                 start_address = max (beg, card_address (card));
27773             }
27774
27775             limit = min (end, card_address (end_card));
27776         }
27777
27778         if (!foundp || (last_object >= end) || (card_address (card) >= end))
27779         {
27780             if (foundp && (cg_pointers_found == 0))
27781             {
27782                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
27783                            (size_t)end));
27784                 clear_cards (card, card_of (end));
27785                 n_card_set -= (card_of (end) - card);
27786                 total_cards_cleared += (card_of (end) - card);
27787             }
27788
27789             n_eph += cg_pointers_found;
27790             cg_pointers_found = 0;
27791
27792             if ((seg = heap_segment_next_in_range (seg)) != 0)
27793             {
27794 #ifdef BACKGROUND_GC
27795                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
27796 #endif //BACKGROUND_GC
27797                 beg = heap_segment_mem (seg);
27798                 end = compute_next_end (seg, low);
27799                 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
27800                 card = card_of (beg);
27801                 last_object = beg;
27802                 end_card = 0;
27803                 continue;
27804             }
27805             else
27806             {
27807                 break;
27808             }
27809         }
27810
27811         // We've found a card and will now go through the objects in it.
27812         assert (card_set_p (card));
27813         {
27814             uint8_t* o = last_object;
27815             o = find_first_object (start_address, last_object);
27816             // Never visit an object twice.
27817             assert (o >= last_object);
27818
27819             //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
27820             dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
27821                    card, (size_t)o, (size_t)limit, (size_t)gen_boundary));
27822
27823             while (o < limit)
27824             {
27825                 assert (Align (size (o)) >= Align (min_obj_size));
27826                 size_t s = size (o);
27827
27828                 uint8_t* next_o =  o + Align (s);
27829                 Prefetch (next_o);
27830
27831                 if ((o >= gen_boundary) &&
27832                     (seg == ephemeral_heap_segment))
27833                 {
27834                     dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary));
27835                     curr_gen_number--;
27836                     assert ((curr_gen_number > 0));
27837                     gen_boundary = generation_allocation_start
27838                         (generation_of (curr_gen_number - 1));
27839                     next_boundary = (compute_next_boundary
27840                                      (low, curr_gen_number, relocating));
27841                 }
27842
27843                 dprintf (4, ("|%Ix|", (size_t)o));
27844
27845                 if (next_o < start_address)
27846                 {
27847                     goto end_object;
27848                 }
27849
27850 #ifdef BACKGROUND_GC
27851                 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
27852                 {
27853                     goto end_object;
27854                 }
27855 #endif //BACKGROUND_GC
27856
27857 #ifdef COLLECTIBLE_CLASS
27858                 if (is_collectible(o))
27859                 {
27860                     BOOL passed_end_card_p = FALSE;
27861
27862                     if (card_of (o) > card)
27863                     {
27864                         passed_end_card_p = card_transition (o, end, card_word_end,
27865                             cg_pointers_found, 
27866                             n_eph, n_card_set,
27867                             card, end_card,
27868                             foundp, start_address,
27869                             limit, total_cards_cleared);
27870                     }
27871
27872                     if ((!passed_end_card_p || foundp) && (card_of (o) == card))
27873                     {
27874                         // card is valid and it covers the head of the object
27875                         if (fn == &gc_heap::relocate_address)
27876                         {
27877                             keep_card_live (o, n_gen, cg_pointers_found);
27878                         }
27879                         else
27880                         {
27881                             uint8_t* class_obj = get_class_object (o);
27882                             mark_through_cards_helper (&class_obj, n_gen,
27883                                                     cg_pointers_found, fn,
27884                                                     nhigh, next_boundary);
27885                         }
27886                     }
27887
27888                     if (passed_end_card_p)
27889                     {
27890                         if (foundp && (card_address (card) < next_o))
27891                         {
27892                             goto go_through_refs;
27893                         }
27894                         else if (foundp && (start_address < limit))
27895                         {
27896                             next_o = find_first_object (start_address, o);
27897                             goto end_object;
27898                         }
27899                         else
27900                             goto end_limit;                            
27901                     }
27902                 }
27903
27904 go_through_refs:
27905 #endif //COLLECTIBLE_CLASS
27906
27907                 if (contain_pointers (o))
27908                 {
27909                     dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address));
27910
27911                     {
27912                         dprintf (4, ("normal object path"));
27913                         go_through_object
27914                             (method_table(o), o, s, poo,
27915                              start_address, use_start, (o + s),
27916                              {
27917                                  dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo));
27918                                  if (card_of ((uint8_t*)poo) > card)
27919                                  {
27920                                     BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
27921                                             card_word_end,
27922                                             cg_pointers_found, 
27923                                             n_eph, n_card_set,
27924                                             card, end_card,
27925                                             foundp, start_address,
27926                                             limit, total_cards_cleared);
27927
27928                                      if (passed_end_card_p)
27929                                      {
27930                                         if (foundp && (card_address (card) < next_o))
27931                                         {
27932                                              //new_start();
27933                                              {
27934                                                  if (ppstop <= (uint8_t**)start_address)
27935                                                      {break;}
27936                                                  else if (poo < (uint8_t**)start_address)
27937                                                      {poo = (uint8_t**)start_address;}
27938                                              }
27939                                         }
27940                                         else if (foundp && (start_address < limit))
27941                                         {
27942                                             next_o = find_first_object (start_address, o);
27943                                             goto end_object;
27944                                         }
27945                                          else
27946                                             goto end_limit;
27947                                      }
27948                                  }
27949
27950                                  mark_through_cards_helper (poo, n_gen,
27951                                                             cg_pointers_found, fn,
27952                                                             nhigh, next_boundary);
27953                              }
27954                             );
27955                     }
27956                 }
27957
27958             end_object:
27959                 if (((size_t)next_o / brick_size) != ((size_t) o / brick_size))
27960                 {
27961                     if (brick_table [brick_of (o)] <0)
27962                         fix_brick_to_highest (o, next_o);
27963                 }
27964                 o = next_o;
27965             }
27966         end_limit:
27967             last_object = o;
27968         }
27969     }
27970     // compute the efficiency ratio of the card table
27971     if (!relocating)
27972     {
27973         generation_skip_ratio = ((n_eph > 400)? (int)(((float)n_gen / (float)n_eph) * 100) : 100);
27974         dprintf (3, ("Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", 
27975             n_eph, n_gen , n_card_set, total_cards_cleared, generation_skip_ratio));
27976     }
27977     else
27978     {
27979         dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d", 
27980             n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio));
27981     }
27982 }
27983
27984 #ifdef SEG_REUSE_STATS
27985 size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size)
27986 {
27987     size_t total_items = 0;
27988     *total_size = 0;
27989     for (int i = 0; i < count; i++)
27990     {
27991         total_items += ordered_indices[i];
27992         *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i);
27993         dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i)));
27994     } 
27995     dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size));
27996     return total_items;
27997 }
27998 #endif // SEG_REUSE_STATS
27999
28000 void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug)
28001 {
28002     // detect pinned plugs
28003     if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
28004     {
28005         deque_pinned_plug();
28006         update_oldest_pinned_plug();
28007         dprintf (3, ("dequed pin,now oldest pin is %Ix", pinned_plug (oldest_pin())));
28008     }
28009     else
28010     {
28011         size_t plug_size = last_plug_size + Align(min_obj_size);
28012         BOOL is_padded = FALSE;
28013
28014 #ifdef SHORT_PLUGS
28015         plug_size += Align (min_obj_size);
28016         is_padded = TRUE;
28017 #endif //SHORT_PLUGS
28018
28019 #ifdef RESPECT_LARGE_ALIGNMENT
28020         plug_size += switch_alignment_size (is_padded);
28021 #endif //RESPECT_LARGE_ALIGNMENT
28022
28023         total_ephemeral_plugs += plug_size;
28024         size_t plug_size_power2 = round_up_power2 (plug_size);
28025         ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++;
28026         dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array", 
28027             heap_number, 
28028             last_plug, 
28029             plug_size, 
28030             (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2)));
28031     }
28032 }
28033
28034 void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug)
28035 {
28036     assert ((tree != NULL));
28037     if (node_left_child (tree))
28038     {
28039         count_plugs_in_brick (tree + node_left_child (tree), last_plug);
28040     }
28041
28042     if (last_plug != 0)
28043     {
28044         uint8_t*  plug = tree;
28045         size_t gap_size = node_gap_size (plug);
28046         uint8_t*   gap = (plug - gap_size);
28047         uint8_t*  last_plug_end = gap;
28048         size_t  last_plug_size = (last_plug_end - last_plug);
28049         dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix",
28050             tree, last_plug, gap_size, gap, last_plug_size));
28051
28052         if (tree == oldest_pinned_plug)
28053         {
28054             dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix",
28055                 tree, last_plug, last_plug_size));
28056             mark* m = oldest_pin();
28057             if (m->has_pre_plug_info())
28058             {
28059                 last_plug_size += sizeof (gap_reloc_pair);
28060                 dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size));
28061             }
28062         }
28063         // Can't assert here - if it's a pinned plug it can be less.
28064         //assert (last_plug_size >= Align (min_obj_size));
28065
28066         count_plug (last_plug_size, last_plug);
28067     }
28068
28069     last_plug = tree;
28070
28071     if (node_right_child (tree))
28072     {
28073         count_plugs_in_brick (tree + node_right_child (tree), last_plug);
28074     }
28075 }
28076
28077 void gc_heap::build_ordered_plug_indices ()
28078 {
28079     memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices));
28080     memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices));
28081
28082     uint8_t*  start_address = generation_limit (max_generation);
28083     uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment);
28084     size_t  current_brick = brick_of (start_address);
28085     size_t  end_brick = brick_of (end_address - 1);
28086     uint8_t* last_plug = 0;
28087
28088     //Look for the right pinned plug to start from.
28089     reset_pinned_queue_bos();
28090     while (!pinned_plug_que_empty_p())
28091     {
28092         mark* m = oldest_pin();
28093         if ((m->first >= start_address) && (m->first < end_address))
28094         {
28095             dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address));
28096
28097             break;
28098         }
28099         else
28100             deque_pinned_plug();
28101     }
28102     
28103     update_oldest_pinned_plug();
28104
28105     while (current_brick <= end_brick)
28106     {
28107         int brick_entry =  brick_table [ current_brick ];
28108         if (brick_entry >= 0)
28109         {
28110             count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug);
28111         }
28112
28113         current_brick++;
28114     }
28115
28116     if (last_plug !=0)
28117     {
28118         count_plug (end_address - last_plug, last_plug);
28119     }
28120
28121     // we need to make sure that after fitting all the existing plugs, we
28122     // have big enough free space left to guarantee that the next allocation
28123     // will succeed.
28124     size_t extra_size = END_SPACE_AFTER_GC + Align (min_obj_size);
28125     total_ephemeral_plugs += extra_size;
28126     dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs"));
28127     ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++;
28128     
28129     memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices));
28130
28131 #ifdef SEG_REUSE_STATS
28132     dprintf (SEG_REUSE_LOG_0, ("Plugs:"));
28133     size_t total_plug_power2 = 0;
28134     dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2);
28135     dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))", 
28136                 total_ephemeral_plugs, 
28137                 total_plug_power2, 
28138                 (total_ephemeral_plugs ? 
28139                     (total_plug_power2 * 100 / total_ephemeral_plugs) :
28140                     0)));
28141     dprintf (SEG_REUSE_LOG_0, ("-------------------"));
28142 #endif // SEG_REUSE_STATS
28143 }
28144
28145 void gc_heap::init_ordered_free_space_indices ()
28146 {
28147     memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices));
28148     memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices));
28149 }
28150
28151 void gc_heap::trim_free_spaces_indices ()
28152 {
28153     trimmed_free_space_index = -1;
28154     size_t max_count = max_free_space_items - 1;
28155     size_t count = 0;
28156     int i = 0;
28157     for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--)
28158     {
28159         count += ordered_free_space_indices[i];
28160
28161         if (count >= max_count)
28162         {
28163             break;
28164         }
28165     }
28166
28167     ptrdiff_t extra_free_space_items = count - max_count;
28168
28169     if (extra_free_space_items > 0)
28170     {
28171         ordered_free_space_indices[i] -= extra_free_space_items;
28172         free_space_items = max_count;
28173         trimmed_free_space_index = i;
28174     }
28175     else
28176     {
28177         free_space_items = count;
28178     }
28179
28180     if (i == -1)
28181     {
28182         i = 0;
28183     }
28184
28185     free_space_buckets = MAX_NUM_BUCKETS - i;
28186
28187     for (--i; i >= 0; i--)
28188     {
28189         ordered_free_space_indices[i] = 0;
28190     }
28191
28192     memcpy (saved_ordered_free_space_indices, 
28193             ordered_free_space_indices,
28194             sizeof(ordered_free_space_indices));
28195 }
28196
28197 // We fit as many plugs as we can and update the number of plugs left and the number
28198 // of free spaces left.
28199 BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index)
28200 {
28201     assert (small_index <= big_index);
28202     assert (big_index < MAX_NUM_BUCKETS);
28203
28204     size_t small_blocks = ordered_blocks[small_index];
28205
28206     if (small_blocks == 0)
28207     {
28208         return TRUE;
28209     }
28210
28211     size_t big_spaces = ordered_spaces[big_index];
28212
28213     if (big_spaces == 0)
28214     {
28215         return FALSE;
28216     }
28217
28218     dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces", 
28219         heap_number,
28220         small_blocks, (small_index + MIN_INDEX_POWER2),
28221         big_spaces, (big_index + MIN_INDEX_POWER2)));
28222
28223     size_t big_to_small = big_spaces << (big_index - small_index);
28224
28225     ptrdiff_t extra_small_spaces = big_to_small - small_blocks;
28226     dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks", 
28227         heap_number,
28228         big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2)));
28229     BOOL can_fit = (extra_small_spaces >= 0);
28230
28231     if (can_fit) 
28232     {
28233         dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks", 
28234             heap_number,
28235             extra_small_spaces, (small_index + MIN_INDEX_POWER2)));
28236     }
28237
28238     int i = 0;
28239
28240     dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2)));
28241     ordered_spaces[big_index] = 0;
28242     if (extra_small_spaces > 0)
28243     {
28244         dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2)));
28245         ordered_blocks[small_index] = 0;
28246         for (i = small_index; i < big_index; i++)
28247         {
28248             if (extra_small_spaces & 1)
28249             {
28250                 dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d", 
28251                     heap_number,
28252                     (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1)));
28253                 ordered_spaces[i] += 1;
28254             }
28255             extra_small_spaces >>= 1;
28256         }
28257
28258         dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d", 
28259             heap_number,
28260             (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces)));
28261         ordered_spaces[i] += extra_small_spaces;
28262     }
28263     else
28264     {
28265         dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d", 
28266             heap_number,
28267             (small_index + MIN_INDEX_POWER2), 
28268             ordered_blocks[small_index], 
28269             (ordered_blocks[small_index] - big_to_small)));
28270         ordered_blocks[small_index] -= big_to_small;
28271     }
28272
28273 #ifdef SEG_REUSE_STATS
28274     size_t temp;
28275     dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number));
28276     dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp);
28277
28278     dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number));
28279     dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp);
28280 #endif //SEG_REUSE_STATS
28281
28282     return can_fit;
28283 }
28284
28285 // space_index gets updated to the biggest available space index.
28286 BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index)
28287 {
28288     assert (*space_index >= block_index);
28289
28290     while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index))
28291     {
28292         (*space_index)--;
28293         if (*space_index < block_index)
28294         {
28295             return FALSE;
28296         }
28297     }
28298
28299     return TRUE;
28300 }
28301
28302 BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count)
28303 {
28304 #ifdef FEATURE_STRUCTALIGN
28305     // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account
28306     return FALSE;
28307 #endif // FEATURE_STRUCTALIGN
28308     int space_index = count - 1;
28309     for (int block_index = (count - 1); block_index >= 0; block_index--)
28310     {
28311         if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index))
28312         {
28313             return FALSE;
28314         }
28315     }
28316
28317     return TRUE;
28318 }
28319
28320 void gc_heap::build_ordered_free_spaces (heap_segment* seg)
28321 {
28322     assert (bestfit_seg);
28323
28324     //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2, 
28325     //                    ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets), 
28326     //                    free_space_buckets, 
28327     //                    free_space_items);
28328
28329     bestfit_seg->add_buckets (MIN_INDEX_POWER2, 
28330                         ordered_free_space_indices, 
28331                         MAX_NUM_BUCKETS, 
28332                         free_space_items);
28333
28334     assert (settings.condemned_generation == max_generation);
28335
28336     uint8_t* first_address = heap_segment_mem (seg);
28337     uint8_t* end_address   = heap_segment_reserved (seg);
28338     //look through the pinned plugs for relevant ones.
28339     //Look for the right pinned plug to start from.
28340     reset_pinned_queue_bos();
28341     mark* m = 0;
28342     // See comment in can_expand_into_p why we need (max_generation + 1).
28343     size_t eph_gen_starts = (Align (min_obj_size)) * (max_generation + 1);
28344     BOOL has_fit_gen_starts = FALSE;
28345
28346     while (!pinned_plug_que_empty_p())
28347     {
28348         m = oldest_pin();
28349         if ((pinned_plug (m) >= first_address) && 
28350             (pinned_plug (m) < end_address) &&
28351             (pinned_len (m) >= eph_gen_starts))
28352         {
28353
28354             assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin);
28355             break;
28356         }
28357         else
28358         {
28359             deque_pinned_plug();
28360         }
28361     }
28362
28363     if (!pinned_plug_que_empty_p())
28364     {
28365         bestfit_seg->add ((void*)m, TRUE, TRUE);
28366         deque_pinned_plug();
28367         m = oldest_pin();
28368         has_fit_gen_starts = TRUE;
28369     }
28370
28371     while (!pinned_plug_que_empty_p() &&
28372             ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
28373     {
28374         bestfit_seg->add ((void*)m, TRUE, FALSE);
28375         deque_pinned_plug();
28376         m = oldest_pin();
28377     }
28378
28379     if (commit_end_of_seg)
28380     {
28381         if (!has_fit_gen_starts)
28382         {
28383             assert (bestfit_first_pin == heap_segment_plan_allocated (seg));
28384         }
28385         bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts));
28386     }
28387
28388 #ifdef _DEBUG
28389     bestfit_seg->check();
28390 #endif //_DEBUG
28391 }
28392
28393 BOOL gc_heap::try_best_fit (BOOL end_of_segment_p)
28394 {
28395     if (!end_of_segment_p)
28396     {
28397         trim_free_spaces_indices ();
28398     }
28399
28400     BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices, 
28401                                              ordered_free_space_indices, 
28402                                              MAX_NUM_BUCKETS);
28403
28404     return can_bestfit;
28405 }
28406
28407 BOOL gc_heap::best_fit (size_t free_space, 
28408                         size_t largest_free_space, 
28409                         size_t additional_space, 
28410                         BOOL* use_additional_space)
28411 {
28412     dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation));
28413
28414     assert (!additional_space || (additional_space && use_additional_space));
28415     if (use_additional_space)
28416     {
28417         *use_additional_space = FALSE;
28418     }
28419
28420     if (ordered_plug_indices_init == FALSE)
28421     {
28422         total_ephemeral_plugs = 0;
28423         build_ordered_plug_indices();
28424         ordered_plug_indices_init = TRUE;
28425     }
28426     else
28427     {
28428         memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices));
28429     }
28430
28431     if (total_ephemeral_plugs == (END_SPACE_AFTER_GC + Align (min_obj_size)))
28432     {
28433         dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done"));
28434         size_t empty_eph = (END_SPACE_AFTER_GC + Align (min_obj_size) + (Align (min_obj_size)) * (max_generation + 1));
28435         BOOL can_fit_empty_eph = (largest_free_space >= empty_eph);
28436         if (!can_fit_empty_eph)
28437         {
28438             can_fit_empty_eph = (additional_space >= empty_eph);
28439
28440             if (can_fit_empty_eph)
28441             {
28442                 *use_additional_space = TRUE;
28443             }
28444         }
28445
28446         return can_fit_empty_eph;
28447     }
28448
28449     if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space))
28450     {
28451         dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done"));
28452         return FALSE;
28453     }
28454
28455     if ((free_space + additional_space) == 0)
28456     {
28457         dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done"));
28458         return FALSE;
28459     }
28460
28461 #ifdef SEG_REUSE_STATS
28462     dprintf (SEG_REUSE_LOG_0, ("Free spaces:"));
28463     size_t total_free_space_power2 = 0;
28464     size_t total_free_space_items = 
28465         dump_buckets (ordered_free_space_indices, 
28466                       MAX_NUM_BUCKETS,
28467                       &total_free_space_power2);
28468     dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items));
28469
28470     dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix",
28471                 total_ephemeral_plugs, 
28472                 free_space, 
28473                 total_free_space_power2, 
28474                 (free_space ? (total_free_space_power2 * 100 / free_space) : 0),
28475                 additional_space));
28476
28477     size_t saved_all_free_space_indices[MAX_NUM_BUCKETS];
28478     memcpy (saved_all_free_space_indices, 
28479             ordered_free_space_indices, 
28480             sizeof(saved_all_free_space_indices));
28481
28482 #endif // SEG_REUSE_STATS
28483
28484     if (total_ephemeral_plugs > (free_space + additional_space))
28485     {
28486         return FALSE;
28487     }
28488
28489     use_bestfit = try_best_fit(FALSE);
28490
28491     if (!use_bestfit && additional_space)
28492     {
28493         int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space));
28494
28495         if (relative_free_space_index != -1)
28496         {
28497             int relative_plug_index = 0;
28498             size_t plugs_to_fit = 0;
28499
28500             for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--)
28501             {
28502                 plugs_to_fit = ordered_plug_indices[relative_plug_index];
28503                 if (plugs_to_fit != 0)
28504                 {
28505                     break;
28506                 }
28507             }
28508
28509             if ((relative_plug_index > relative_free_space_index) ||
28510                 ((relative_plug_index == relative_free_space_index) &&
28511                 (plugs_to_fit > 1)))
28512             {
28513 #ifdef SEG_REUSE_STATS
28514                 dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)",
28515                             (relative_free_space_index + MIN_INDEX_POWER2),
28516                             plugs_to_fit,
28517                             (relative_plug_index + MIN_INDEX_POWER2)));
28518 #endif // SEG_REUSE_STATS
28519                 goto adjust;
28520             }
28521             
28522             dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2)));
28523             ordered_free_space_indices[relative_free_space_index]++;
28524             use_bestfit = try_best_fit(TRUE);
28525             if (use_bestfit)
28526             {
28527                 free_space_items++;
28528                 // Since we might've trimmed away some of the free spaces we had, we should see
28529                 // if we really need to use end of seg space - if it's the same or smaller than
28530                 // the largest space we trimmed we can just add that one back instead of 
28531                 // using end of seg.
28532                 if (relative_free_space_index > trimmed_free_space_index)
28533                 {
28534                     *use_additional_space = TRUE;
28535                 }
28536                 else 
28537                 {
28538                     // If the addition space is <= than the last trimmed space, we
28539                     // should just use that last trimmed space instead.
28540                     saved_ordered_free_space_indices[trimmed_free_space_index]++;
28541                 }
28542             }
28543         }
28544     }
28545
28546 adjust:
28547
28548     if (!use_bestfit)
28549     {
28550         dprintf (SEG_REUSE_LOG_0, ("couldn't fit..."));
28551
28552 #ifdef SEG_REUSE_STATS
28553         size_t saved_max = max_free_space_items;
28554         BOOL temp_bestfit = FALSE;
28555
28556         dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----"));
28557         dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items));
28558
28559         // TODO: need to take the end of segment into consideration.
28560         while (max_free_space_items <= total_free_space_items)
28561         {
28562             max_free_space_items += max_free_space_items / 2;
28563             dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items));
28564             memcpy (ordered_free_space_indices, 
28565                     saved_all_free_space_indices,
28566                     sizeof(ordered_free_space_indices));
28567             if (try_best_fit(FALSE))
28568             {
28569                 temp_bestfit = TRUE;
28570                 break;
28571             }
28572         }
28573
28574         if (temp_bestfit)
28575         {
28576             dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items));
28577         }
28578         else
28579         {
28580             dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space"));
28581         }
28582
28583         dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max));
28584         max_free_space_items = saved_max;
28585 #endif // SEG_REUSE_STATS
28586         if (free_space_items)
28587         {
28588             max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2);
28589             max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES);
28590         }
28591         else
28592         {
28593             max_free_space_items = MAX_NUM_FREE_SPACES;
28594         }
28595     }
28596
28597     dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items));
28598     dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n"));
28599
28600     return use_bestfit;
28601 }
28602
28603 BOOL gc_heap::process_free_space (heap_segment* seg, 
28604                          size_t free_space,
28605                          size_t min_free_size, 
28606                          size_t min_cont_size,
28607                          size_t* total_free_space,
28608                          size_t* largest_free_space)
28609 {
28610     *total_free_space += free_space;
28611     *largest_free_space = max (*largest_free_space, free_space);
28612
28613 #ifdef SIMPLE_DPRINTF
28614     dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix", 
28615                 free_space, *total_free_space, *largest_free_space));
28616 #endif //SIMPLE_DPRINTF
28617
28618     if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size))
28619     {
28620 #ifdef SIMPLE_DPRINTF
28621         dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit", 
28622             settings.condemned_generation,
28623             *total_free_space, min_free_size, *largest_free_space, min_cont_size,
28624             (size_t)seg));
28625 #else
28626         UNREFERENCED_PARAMETER(seg);
28627 #endif //SIMPLE_DPRINTF
28628         return TRUE;
28629     }
28630
28631     int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space));
28632     if (free_space_index != -1)
28633     {
28634         ordered_free_space_indices[free_space_index]++;
28635     }
28636     return FALSE;
28637 }
28638
28639 BOOL gc_heap::expand_reused_seg_p()
28640 {
28641     BOOL reused_seg = FALSE;
28642     int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
28643     if ((heap_expand_mechanism == expand_reuse_bestfit) || 
28644         (heap_expand_mechanism == expand_reuse_normal))
28645     {
28646         reused_seg = TRUE;
28647     }
28648
28649     return reused_seg;
28650 }
28651
28652 BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size,
28653                                  allocator* gen_allocator)
28654 {
28655     min_cont_size += END_SPACE_AFTER_GC;
28656     use_bestfit = FALSE;
28657     commit_end_of_seg = FALSE;
28658     bestfit_first_pin = 0;
28659     uint8_t* first_address = heap_segment_mem (seg);
28660     uint8_t* end_address   = heap_segment_reserved (seg);
28661     size_t end_extra_space = end_space_after_gc();
28662
28663     if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg))
28664     {
28665         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end",
28666                                    first_address, end_address, end_extra_space));
28667         return FALSE;
28668     }
28669
28670     end_address -= end_extra_space;
28671
28672     dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix", 
28673         settings.condemned_generation, min_free_size, min_cont_size));
28674     size_t eph_gen_starts = eph_gen_starts_size;
28675
28676     if (settings.condemned_generation == max_generation)
28677     {
28678         size_t free_space = 0;
28679         size_t largest_free_space = free_space;
28680         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address));
28681         //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from. 
28682         //We are going to allocate the generation starts in the 1st free space,
28683         //so start from the first free space that's big enough for gen starts and a min object size.
28684         // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it - 
28685         // we could use it by allocating the last generation start a bit bigger but 
28686         // the complexity isn't worth the effort (those plugs are from gen2 
28687         // already anyway).
28688         reset_pinned_queue_bos();
28689         mark* m = 0;
28690         BOOL has_fit_gen_starts = FALSE;
28691
28692         init_ordered_free_space_indices ();
28693         while (!pinned_plug_que_empty_p())
28694         {
28695             m = oldest_pin();
28696             if ((pinned_plug (m) >= first_address) && 
28697                 (pinned_plug (m) < end_address) &&
28698                 (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size))))
28699             {
28700                 break;
28701             }
28702             else
28703             {
28704                 deque_pinned_plug();
28705             }
28706         }
28707
28708         if (!pinned_plug_que_empty_p())
28709         {
28710             bestfit_first_pin = pinned_plug (m) - pinned_len (m);
28711
28712             if (process_free_space (seg, 
28713                                     pinned_len (m) - eph_gen_starts, 
28714                                     min_free_size, min_cont_size, 
28715                                     &free_space, &largest_free_space))
28716             {
28717                 return TRUE;
28718             }
28719
28720             deque_pinned_plug();
28721             m = oldest_pin();
28722             has_fit_gen_starts = TRUE;
28723         }
28724
28725         dprintf (3, ("first pin is %Ix", pinned_plug (m)));
28726
28727         //tally up free space
28728         while (!pinned_plug_que_empty_p() &&
28729                ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
28730         {
28731             dprintf (3, ("looking at pin %Ix", pinned_plug (m)));
28732             if (process_free_space (seg, 
28733                                     pinned_len (m), 
28734                                     min_free_size, min_cont_size, 
28735                                     &free_space, &largest_free_space))
28736             {
28737                 return TRUE;
28738             }
28739
28740             deque_pinned_plug();
28741             m = oldest_pin();
28742         }
28743
28744         //try to find space at the end of the segment. 
28745         size_t end_space = (end_address - heap_segment_plan_allocated (seg)); 
28746         size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0); 
28747         dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space));
28748         if (end_space >= additional_space)
28749         {
28750             BOOL can_fit = TRUE;
28751             commit_end_of_seg = TRUE;
28752
28753             if (largest_free_space < min_cont_size)
28754             {
28755                 if (end_space >= min_cont_size)
28756                 {
28757                     additional_space = max (min_cont_size, additional_space);
28758                     dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph", 
28759                         seg));
28760                 }
28761                 else 
28762                 {
28763                     if (settings.concurrent)
28764                     {
28765                         can_fit = FALSE;
28766                         commit_end_of_seg = FALSE;
28767                     }
28768                     else
28769                     {
28770                         size_t additional_space_bestfit = additional_space;
28771                         if (!has_fit_gen_starts)
28772                         {
28773                             if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size)))
28774                             {
28775                                 dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id",
28776                                         additional_space_bestfit));
28777                                 return FALSE;
28778                             }
28779
28780                             bestfit_first_pin = heap_segment_plan_allocated (seg);
28781                             additional_space_bestfit -= eph_gen_starts;
28782                         }
28783
28784                         can_fit = best_fit (free_space, 
28785                                             largest_free_space,
28786                                             additional_space_bestfit, 
28787                                             &commit_end_of_seg);
28788
28789                         if (can_fit)
28790                         {
28791                             dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg", 
28792                                 seg, (commit_end_of_seg ? "with" : "without")));
28793                         }
28794                         else
28795                         {
28796                             dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
28797                         }
28798                     }
28799                 }
28800             }
28801             else
28802             {
28803                 dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg));
28804             }
28805
28806             assert (additional_space <= end_space);
28807             if (commit_end_of_seg)
28808             {
28809                 if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space))
28810                 {
28811                     dprintf (2, ("Couldn't commit end of segment?!"));
28812                     use_bestfit = FALSE;
28813  
28814                     return FALSE;
28815                 }
28816
28817                 if (use_bestfit)
28818                 {
28819                     // We increase the index here because growing heap segment could create a discrepency with 
28820                     // the additional space we used (could be bigger).
28821                     size_t free_space_end_of_seg = 
28822                         heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
28823                     int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg));
28824                     saved_ordered_free_space_indices[relative_free_space_index]++;
28825                 }
28826             }
28827         
28828             if (use_bestfit)
28829             {
28830                 memcpy (ordered_free_space_indices, 
28831                         saved_ordered_free_space_indices, 
28832                         sizeof(ordered_free_space_indices));
28833                 max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2);
28834                 max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items);
28835                 dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items));
28836             }
28837
28838             return can_fit;
28839         }
28840
28841         dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
28842         return FALSE;
28843     }
28844     else
28845     {
28846         assert (settings.condemned_generation == (max_generation-1));
28847         size_t free_space = (end_address - heap_segment_plan_allocated (seg));
28848         size_t largest_free_space = free_space;
28849         dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address));
28850         //find the first free list in range of the current segment
28851         size_t sz_list = gen_allocator->first_bucket_size();
28852         unsigned int a_l_idx = 0;
28853         uint8_t* free_list = 0;
28854         for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
28855         {
28856             if ((eph_gen_starts <= sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
28857             {
28858                 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
28859                 while (free_list)
28860                 {
28861                     if ((free_list >= first_address) && 
28862                         (free_list < end_address) && 
28863                         (unused_array_size (free_list) >= eph_gen_starts))
28864                     {
28865                         goto next;
28866                     }
28867                     else
28868                     {
28869                         free_list = free_list_slot (free_list);
28870                     }
28871                 }
28872             }
28873         }
28874 next:
28875         if (free_list)
28876         {
28877             init_ordered_free_space_indices ();
28878             if (process_free_space (seg, 
28879                                     unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size), 
28880                                     min_free_size, min_cont_size, 
28881                                     &free_space, &largest_free_space))
28882             {
28883                 return TRUE;
28884             }
28885
28886             free_list = free_list_slot (free_list);
28887         }
28888         else
28889         {
28890             dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list"));
28891             return FALSE;
28892         }
28893
28894        //tally up free space
28895
28896         while (1)
28897         {
28898             while (free_list)
28899             {
28900                 if ((free_list >= first_address) && (free_list < end_address) &&
28901                     process_free_space (seg, 
28902                                         unused_array_size (free_list), 
28903                                         min_free_size, min_cont_size, 
28904                                         &free_space, &largest_free_space))
28905                 {
28906                     return TRUE;
28907                 }
28908
28909                 free_list = free_list_slot (free_list);
28910             }
28911             a_l_idx++;
28912             if (a_l_idx < gen_allocator->number_of_buckets())
28913             {
28914                 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
28915             }
28916             else
28917                 break;
28918         } 
28919
28920         dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
28921         return FALSE;
28922
28923         /*
28924         BOOL can_fit = best_fit (free_space, 0, NULL);
28925         if (can_fit)
28926         {
28927             dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg));
28928         }
28929         else
28930         {
28931             dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
28932         }
28933
28934         return can_fit;
28935         */
28936     }
28937 }
28938
28939 void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
28940                             generation* gen, uint8_t* start_address,
28941                             unsigned int& active_new_gen_number,
28942                             uint8_t*& last_pinned_gap, BOOL& leftp,
28943                             BOOL shortened_p
28944 #ifdef SHORT_PLUGS
28945                             , mark* pinned_plug_entry
28946 #endif //SHORT_PLUGS
28947                             )
28948 {
28949     // detect generation boundaries
28950     // make sure that active_new_gen_number is not the youngest generation.
28951     // because the generation_limit wouldn't return the right thing in this case.
28952     if (!use_bestfit)
28953     {
28954         if ((active_new_gen_number > 1) &&
28955             (last_plug >= generation_limit (active_new_gen_number)))
28956         {
28957             assert (last_plug >= start_address);
28958             active_new_gen_number--;
28959             realloc_plan_generation_start (generation_of (active_new_gen_number), gen);
28960             assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
28961             leftp = FALSE;
28962         }
28963     }
28964
28965     // detect pinned plugs
28966     if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
28967     {
28968         size_t  entry = deque_pinned_plug();
28969         mark*  m = pinned_plug_of (entry);
28970
28971         size_t saved_pinned_len = pinned_len(m);
28972         pinned_len(m) = last_plug - last_pinned_gap;
28973         //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug));
28974
28975         if (m->has_post_plug_info())
28976         {
28977             last_plug_size += sizeof (gap_reloc_pair);
28978             dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
28979         }
28980
28981         last_pinned_gap = last_plug + last_plug_size;
28982         dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix",
28983             pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size));
28984         leftp = FALSE;
28985
28986         //we are creating a generation fault. set the cards.
28987         {
28988             size_t end_card = card_of (align_on_card (last_plug + last_plug_size));
28989             size_t card = card_of (last_plug);
28990             while (card != end_card)
28991             {
28992                 set_card (card);
28993                 card++;
28994             }
28995         }
28996     }
28997     else if (last_plug >= start_address)
28998     {
28999 #ifdef FEATURE_STRUCTALIGN
29000         int requiredAlignment;
29001         ptrdiff_t pad;
29002         node_aligninfo (last_plug, requiredAlignment, pad);
29003
29004         // from how we previously aligned the plug's destination address,
29005         // compute the actual alignment offset.
29006         uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug);
29007         ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0);
29008         if (!alignmentOffset)
29009         {
29010             // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero.
29011             alignmentOffset = requiredAlignment;
29012         }
29013
29014         //clear the alignment info because we are reallocating
29015         clear_node_aligninfo (last_plug);
29016 #else // FEATURE_STRUCTALIGN
29017         //clear the realignment flag because we are reallocating
29018         clear_node_realigned (last_plug);
29019 #endif // FEATURE_STRUCTALIGN
29020         BOOL adjacentp = FALSE;
29021         BOOL set_padding_on_saved_p = FALSE;
29022
29023         if (shortened_p)
29024         {
29025             last_plug_size += sizeof (gap_reloc_pair);
29026
29027 #ifdef SHORT_PLUGS
29028             assert (pinned_plug_entry != NULL);
29029             if (last_plug_size <= sizeof (plug_and_gap))
29030             {
29031                 set_padding_on_saved_p = TRUE;
29032             }
29033 #endif //SHORT_PLUGS
29034
29035             dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
29036         }
29037
29038 #ifdef SHORT_PLUGS
29039         clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry);
29040 #endif //SHORT_PLUGS
29041
29042         uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug,
29043 #ifdef SHORT_PLUGS
29044                                      set_padding_on_saved_p,
29045                                      pinned_plug_entry,
29046 #endif //SHORT_PLUGS
29047                                      TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG);
29048
29049         dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size));
29050         assert (new_address);
29051         set_node_relocation_distance (last_plug, new_address - last_plug);
29052 #ifdef FEATURE_STRUCTALIGN
29053         if (leftp && node_alignpad (last_plug) == 0)
29054 #else // FEATURE_STRUCTALIGN
29055         if (leftp && !node_realigned (last_plug))
29056 #endif // FEATURE_STRUCTALIGN
29057         {
29058             // TODO - temporarily disable L optimization because of a bug in it.
29059             //set_node_left (last_plug);
29060         }
29061         dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size));
29062         leftp = adjacentp;
29063     }
29064 }
29065
29066 void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug,
29067                                 uint8_t* start_address,
29068                                 generation* gen,
29069                                 unsigned int& active_new_gen_number,
29070                                 uint8_t*& last_pinned_gap, BOOL& leftp)
29071 {
29072     assert (tree != NULL);
29073     int   left_node = node_left_child (tree);
29074     int   right_node = node_right_child (tree);
29075
29076     dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d", 
29077         tree, last_pinned_gap, last_plug, left_node, right_node));
29078
29079     if (left_node)
29080     {
29081         dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug));
29082         realloc_in_brick ((tree + left_node), last_plug, start_address,
29083                           gen, active_new_gen_number, last_pinned_gap,
29084                           leftp);
29085     }
29086
29087     if (last_plug != 0)
29088     {
29089         uint8_t*  plug = tree;
29090
29091         BOOL has_pre_plug_info_p = FALSE;
29092         BOOL has_post_plug_info_p = FALSE;
29093         mark* pinned_plug_entry = get_next_pinned_entry (tree, 
29094                                                          &has_pre_plug_info_p,
29095                                                          &has_post_plug_info_p, 
29096                                                          FALSE);
29097
29098         // We only care about the pre plug info 'cause that's what decides if the last plug is shortened.
29099         // The pinned plugs are handled in realloc_plug.
29100         size_t gap_size = node_gap_size (plug);
29101         uint8_t*   gap = (plug - gap_size);
29102         uint8_t*  last_plug_end = gap;
29103         size_t  last_plug_size = (last_plug_end - last_plug);
29104         // Cannot assert this - a plug could be less than that due to the shortened ones.
29105         //assert (last_plug_size >= Align (min_obj_size));
29106         dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d",
29107             plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0)));
29108         realloc_plug (last_plug_size, last_plug, gen, start_address,
29109                       active_new_gen_number, last_pinned_gap,
29110                       leftp, has_pre_plug_info_p
29111 #ifdef SHORT_PLUGS
29112                       , pinned_plug_entry
29113 #endif //SHORT_PLUGS
29114                       );
29115     }
29116
29117     last_plug = tree;
29118
29119     if (right_node)
29120     {
29121         dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug));
29122         realloc_in_brick ((tree + right_node), last_plug, start_address,
29123                           gen, active_new_gen_number, last_pinned_gap,
29124                           leftp);
29125     }
29126 }
29127
29128 void
29129 gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg,
29130                         uint8_t* start_address, uint8_t* end_address,
29131                         unsigned active_new_gen_number)
29132 {
29133     dprintf (3, ("--- Reallocing ---"));
29134
29135     if (use_bestfit)
29136     {
29137         //make sure that every generation has a planned allocation start
29138         int  gen_number = max_generation - 1;
29139         while (gen_number >= 0)
29140         {
29141             generation* gen = generation_of (gen_number);
29142             if (0 == generation_plan_allocation_start (gen))
29143             {
29144                 generation_plan_allocation_start (gen) = 
29145                     bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size);
29146                 generation_plan_allocation_start_size (gen) = Align (min_obj_size);
29147                 assert (generation_plan_allocation_start (gen));
29148             }
29149             gen_number--;
29150         }
29151     }
29152
29153     uint8_t* first_address = start_address;
29154     //Look for the right pinned plug to start from.
29155     reset_pinned_queue_bos();
29156     uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg);
29157     while (!pinned_plug_que_empty_p())
29158     {
29159         mark* m = oldest_pin();
29160         if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address))
29161         {
29162             if (pinned_plug (m) < first_address)
29163             {
29164                 first_address = pinned_plug (m);
29165             }
29166             break;
29167         }
29168         else
29169             deque_pinned_plug();
29170     }
29171
29172     size_t  current_brick = brick_of (first_address);
29173     size_t  end_brick = brick_of (end_address-1);
29174     uint8_t*  last_plug = 0;
29175
29176     uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg);
29177     BOOL leftp = FALSE;
29178
29179     dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix",
29180         start_address, first_address, pinned_plug (oldest_pin())));
29181
29182     while (current_brick <= end_brick)
29183     {
29184         int   brick_entry =  brick_table [ current_brick ];
29185         if (brick_entry >= 0)
29186         {
29187             realloc_in_brick ((brick_address (current_brick) + brick_entry - 1),
29188                               last_plug, start_address, consing_gen,
29189                               active_new_gen_number, last_pinned_gap,
29190                               leftp);
29191         }
29192         current_brick++;
29193     }
29194
29195     if (last_plug != 0)
29196     {
29197         realloc_plug (end_address - last_plug, last_plug, consing_gen,
29198                       start_address,
29199                       active_new_gen_number, last_pinned_gap,
29200                       leftp, FALSE
29201 #ifdef SHORT_PLUGS
29202                       , NULL
29203 #endif //SHORT_PLUGS
29204                       );
29205     }
29206
29207     //Fix the old segment allocated size
29208     assert (last_pinned_gap >= heap_segment_mem (seg));
29209     assert (last_pinned_gap <= heap_segment_committed (seg));
29210     heap_segment_plan_allocated (seg) = last_pinned_gap;
29211 }
29212
29213 void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end)
29214 {
29215 #ifdef VERIFY_HEAP
29216     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
29217     {
29218         BOOL contains_pinned_plugs = FALSE;
29219         size_t mi = 0;
29220         mark* m = 0;
29221         while (mi != mark_stack_tos)
29222         {
29223             m = pinned_plug_of (mi);
29224             if ((pinned_plug (m) >= start) && (pinned_plug (m) < end))
29225             {
29226                 contains_pinned_plugs = TRUE;
29227                 break;
29228             }
29229             else
29230                 mi++;
29231         }
29232
29233         if (contains_pinned_plugs)
29234         {
29235             FATAL_GC_ERROR();
29236         }
29237     }
29238 #endif //VERIFY_HEAP
29239 }
29240
29241 void gc_heap::set_expand_in_full_gc (int condemned_gen_number)
29242 {
29243     if (!should_expand_in_full_gc)
29244     {
29245         if ((condemned_gen_number != max_generation) && 
29246             (settings.pause_mode != pause_low_latency) &&
29247             (settings.pause_mode != pause_sustained_low_latency))
29248         {
29249             should_expand_in_full_gc = TRUE;
29250         }
29251     }
29252 }
29253
29254 void gc_heap::save_ephemeral_generation_starts()
29255 {
29256     for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++)
29257     {
29258         saved_ephemeral_plan_start[ephemeral_generation] = 
29259             generation_plan_allocation_start (generation_of (ephemeral_generation));
29260         saved_ephemeral_plan_start_size[ephemeral_generation] = 
29261             generation_plan_allocation_start_size (generation_of (ephemeral_generation));
29262     }
29263 }
29264
29265 generation* gc_heap::expand_heap (int condemned_generation,
29266                                   generation* consing_gen,
29267                                   heap_segment* new_heap_segment)
29268 {
29269     UNREFERENCED_PARAMETER(condemned_generation);
29270     assert (condemned_generation >= (max_generation -1));
29271     unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
29272     uint8_t*  start_address = generation_limit (max_generation);
29273     uint8_t*  end_address = heap_segment_allocated (ephemeral_heap_segment);
29274     BOOL should_promote_ephemeral = FALSE;
29275     ptrdiff_t eph_size = total_ephemeral_size;
29276 #ifdef BACKGROUND_GC
29277     dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
29278 #endif //BACKGROUND_GC
29279     settings.heap_expansion = TRUE;
29280
29281 #ifdef BACKGROUND_GC
29282     if (cm_in_progress)
29283     {
29284         if (!expanded_in_fgc)
29285         {
29286             expanded_in_fgc = TRUE;
29287         }
29288     }
29289 #endif //BACKGROUND_GC
29290
29291     //reset the elevation state for next time.
29292     dprintf (2, ("Elevation: elevation = el_none"));
29293     if (settings.should_lock_elevation && !expand_reused_seg_p())
29294         settings.should_lock_elevation = FALSE;
29295
29296     heap_segment* new_seg = new_heap_segment;
29297
29298     if (!new_seg)
29299         return consing_gen;
29300
29301     //copy the card and brick tables
29302     if (g_gc_card_table!= card_table)
29303         copy_brick_card_table();
29304
29305     BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
29306     dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p));
29307
29308     assert (generation_plan_allocation_start (generation_of (max_generation-1)));
29309     assert (generation_plan_allocation_start (generation_of (max_generation-1)) >=
29310             heap_segment_mem (ephemeral_heap_segment));
29311     assert (generation_plan_allocation_start (generation_of (max_generation-1)) <=
29312             heap_segment_committed (ephemeral_heap_segment));
29313
29314     assert (generation_plan_allocation_start (youngest_generation));
29315     assert (generation_plan_allocation_start (youngest_generation) <
29316             heap_segment_plan_allocated (ephemeral_heap_segment));
29317
29318     if (settings.pause_mode == pause_no_gc)
29319     {
29320         // We don't reuse for no gc, so the size used on the new eph seg is eph_size.
29321         if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc))
29322             should_promote_ephemeral = TRUE;
29323     }
29324     else
29325     {
29326         if (!use_bestfit)
29327         {
29328             should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral);
29329         }
29330     }
29331
29332     if (should_promote_ephemeral)
29333     {
29334         ephemeral_promotion = TRUE;
29335         get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep);
29336         dprintf (2, ("promoting ephemeral"));
29337         save_ephemeral_generation_starts();
29338     }
29339     else
29340     {
29341         // commit the new ephemeral segment all at once if it is a new one.
29342         if ((eph_size > 0) && new_segment_p)
29343         {
29344 #ifdef FEATURE_STRUCTALIGN
29345             // The destination may require a larger alignment padding than the source.
29346             // Assume the worst possible alignment padding.
29347             eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET);
29348 #endif // FEATURE_STRUCTALIGN
29349 #ifdef RESPECT_LARGE_ALIGNMENT
29350             //Since the generation start can be larger than min_obj_size
29351             //The alignment could be switched. 
29352             eph_size += switch_alignment_size(FALSE);
29353 #endif //RESPECT_LARGE_ALIGNMENT
29354             //Since the generation start can be larger than min_obj_size
29355             //Compare the alignment of the first object in gen1 
29356             if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0)
29357             {
29358                 fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE);
29359                 return consing_gen;
29360             }
29361             heap_segment_used (new_seg) = heap_segment_committed (new_seg);
29362         }
29363
29364         //Fix the end of the old ephemeral heap segment
29365         heap_segment_plan_allocated (ephemeral_heap_segment) =
29366             generation_plan_allocation_start (generation_of (max_generation-1));
29367
29368         dprintf (3, ("Old ephemeral allocated set to %Ix",
29369                     (size_t)heap_segment_plan_allocated (ephemeral_heap_segment)));
29370     }
29371
29372     if (new_segment_p)
29373     {
29374         // TODO - Is this really necessary? We should think about it.
29375         //initialize the first brick
29376         size_t first_brick = brick_of (heap_segment_mem (new_seg));
29377         set_brick (first_brick,
29378                 heap_segment_mem (new_seg) - brick_address (first_brick));
29379     }
29380
29381     //From this point on, we cannot run out of memory
29382
29383     //reset the allocation of the consing generation back to the end of the
29384     //old ephemeral segment
29385     generation_allocation_limit (consing_gen) =
29386         heap_segment_plan_allocated (ephemeral_heap_segment);
29387     generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen);
29388     generation_allocation_segment (consing_gen) = ephemeral_heap_segment;
29389
29390     //clear the generation gap for all of the ephemeral generations
29391     {
29392         int generation_num = max_generation-1;
29393         while (generation_num >= 0)
29394         {
29395             generation* gen = generation_of (generation_num);
29396             generation_plan_allocation_start (gen) = 0;
29397             generation_num--;
29398         }
29399     }
29400
29401     heap_segment* old_seg = ephemeral_heap_segment;
29402     ephemeral_heap_segment = new_seg;
29403
29404     //Note: the ephemeral segment shouldn't be threaded onto the segment chain
29405     //because the relocation and compact phases shouldn't see it
29406
29407     // set the generation members used by allocate_in_expanded_heap
29408     // and switch to ephemeral generation
29409     consing_gen = ensure_ephemeral_heap_segment (consing_gen);
29410
29411     if (!should_promote_ephemeral)
29412     {
29413         realloc_plugs (consing_gen, old_seg, start_address, end_address,
29414                     active_new_gen_number);
29415     }
29416
29417     if (!use_bestfit)
29418     {
29419         repair_allocation_in_expanded_heap (consing_gen);
29420     }
29421
29422     // assert that the generation gap for all of the ephemeral generations were allocated.
29423 #ifdef _DEBUG
29424     {
29425         int generation_num = max_generation-1;
29426         while (generation_num >= 0)
29427         {
29428             generation* gen = generation_of (generation_num);
29429             assert (generation_plan_allocation_start (gen));
29430             generation_num--;
29431         }
29432     }
29433 #endif // _DEBUG
29434
29435     if (!new_segment_p)
29436     {
29437         dprintf (2, ("Demoting ephemeral segment"));
29438         //demote the entire segment.
29439         settings.demotion = TRUE;
29440         get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
29441         demotion_low = heap_segment_mem (ephemeral_heap_segment);
29442         demotion_high = heap_segment_reserved (ephemeral_heap_segment);
29443     }
29444     else
29445     {
29446         demotion_low = MAX_PTR;
29447         demotion_high = 0;
29448 #ifndef MULTIPLE_HEAPS
29449         settings.demotion = FALSE;
29450         get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
29451 #endif //!MULTIPLE_HEAPS
29452     }
29453     ptrdiff_t eph_size1 = total_ephemeral_size;
29454     MAYBE_UNUSED_VAR(eph_size1);
29455
29456     if (!should_promote_ephemeral && new_segment_p)
29457     {
29458         assert (eph_size1 <= eph_size);
29459     }
29460
29461     if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
29462     {
29463         // This is to catch when we accidently delete a segment that has pins.
29464         verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg));
29465     }
29466
29467     verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg));
29468
29469     dprintf(2,("---- End of Heap Expansion ----"));
29470     return consing_gen;
29471 }
29472
29473 void gc_heap::set_static_data()
29474 {
29475     static_data* pause_mode_sdata = static_data_table[latency_level];
29476     for (int i = 0; i < NUMBERGENERATIONS; i++)
29477     {
29478         dynamic_data* dd = dynamic_data_of (i);
29479         static_data* sdata = &pause_mode_sdata[i];
29480
29481         dd->sdata = sdata;
29482         dd->min_size = sdata->min_size;
29483
29484         dprintf (GTC_LOG, ("PM: %d - min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%",
29485             settings.pause_mode,
29486             dd->min_size, dd_max_size, 
29487             dd->fragmentation_limit, (int)(dd->fragmentation_burden_limit * 100)));
29488     }
29489 }
29490
29491 // Initialize the values that are not const.
29492 void gc_heap::init_static_data()
29493 {
29494     size_t gen0size = GCHeap::GetValidGen0MaxSize(get_valid_segment_size());
29495     size_t gen0_min_size = Align(gen0size / 8 * 5);
29496
29497     size_t gen0_max_size =
29498 #ifdef MULTIPLE_HEAPS
29499         max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024));
29500 #else //MULTIPLE_HEAPS
29501         (gc_can_use_concurrent ?
29502             6*1024*1024 :
29503             max (6*1024*1024,  min ( Align(soh_segment_size/2), 200*1024*1024)));
29504 #endif //MULTIPLE_HEAPS
29505
29506     // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap.
29507     size_t gen1_max_size = 
29508 #ifdef MULTIPLE_HEAPS
29509         max (6*1024*1024, Align(soh_segment_size/2));
29510 #else //MULTIPLE_HEAPS
29511         (gc_can_use_concurrent ?
29512             6*1024*1024 :
29513             max (6*1024*1024, Align(soh_segment_size/2)));
29514 #endif //MULTIPLE_HEAPS
29515
29516     dprintf (GTC_LOG, ("gen0size: %Id, gen0 min: %Id, max: %Id, gen1 max: %Id",
29517         gen0size, gen0_min_size, gen0_max_size, gen1_max_size));
29518
29519     for (int i = latency_level_first; i <= latency_level_last; i++)
29520     {
29521         static_data_table[i][0].min_size = gen0_min_size;
29522         static_data_table[i][0].max_size = gen0_max_size;
29523         static_data_table[i][1].max_size = gen1_max_size;
29524     }
29525 }
29526
29527 bool gc_heap::init_dynamic_data()
29528 {
29529     qpf = GCToOSInterface::QueryPerformanceFrequency();
29530
29531     uint32_t now = (uint32_t)GetHighPrecisionTimeStamp();
29532
29533     set_static_data();
29534
29535     for (int i = 0; i <= max_generation+1; i++)
29536     {
29537         dynamic_data* dd = dynamic_data_of (i);
29538         dd->gc_clock = 0;
29539         dd->time_clock = now;
29540         dd->current_size = 0;
29541         dd->promoted_size = 0;
29542         dd->collection_count = 0;
29543         dd->new_allocation = dd->min_size;
29544         dd->gc_new_allocation = dd->new_allocation;
29545         dd->desired_allocation = dd->new_allocation;
29546         dd->fragmentation = 0;
29547     }
29548
29549 #ifdef GC_CONFIG_DRIVEN
29550     if (heap_number == 0)
29551         time_init = now;
29552 #endif //GC_CONFIG_DRIVEN
29553
29554     return true;
29555 }
29556
29557 float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
29558 {
29559     if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
29560         return ((limit - limit*cst) / (1.0f - (cst * limit)));
29561     else
29562         return max_limit;
29563 }
29564
29565
29566 //if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may 
29567 //not be correct (collection happened too soon). Correct with a linear estimation based on the previous 
29568 //value of the budget 
29569 static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation, 
29570                                        size_t previous_desired_allocation, size_t collection_count)
29571 {
29572     if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0))
29573     {
29574         dprintf (2, ("allocation fraction: %d", (int)(allocation_fraction/100.0)));
29575         new_allocation = (size_t)(allocation_fraction*new_allocation + (1.0-allocation_fraction)*previous_desired_allocation);
29576     }
29577 #if 0 
29578     size_t smoothing = 3; // exponential smoothing factor
29579     if (smoothing  > collection_count)
29580         smoothing  = collection_count;
29581     new_allocation = new_allocation / smoothing + ((previous_desired_allocation / smoothing) * (smoothing-1));
29582 #else
29583     UNREFERENCED_PARAMETER(collection_count);
29584 #endif //0
29585     return new_allocation;
29586 }
29587
29588 size_t gc_heap::desired_new_allocation (dynamic_data* dd,
29589                                         size_t out, int gen_number,
29590                                         int pass)
29591 {
29592     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
29593
29594     if (dd_begin_data_size (dd) == 0)
29595     {
29596         size_t new_allocation = dd_min_size (dd);
29597         current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation;        
29598         return new_allocation;
29599     }
29600     else
29601     {
29602         float     cst;
29603         size_t    previous_desired_allocation = dd_desired_allocation (dd);
29604         size_t    current_size = dd_current_size (dd);
29605         float     max_limit = dd_max_limit (dd);
29606         float     limit = dd_limit (dd);
29607         size_t    min_gc_size = dd_min_size (dd);
29608         float     f = 0;
29609         size_t    max_size = dd_max_size (dd);
29610         size_t    new_allocation = 0;
29611         float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd));
29612         if (gen_number >= max_generation)
29613         {
29614             size_t    new_size = 0;
29615
29616             cst = min (1.0f, float (out) / float (dd_begin_data_size (dd)));
29617
29618             f = surv_to_growth (cst, limit, max_limit);
29619             size_t max_growth_size = (size_t)(max_size / f);
29620             if (current_size >= max_growth_size)
29621             {
29622                 new_size = max_size;
29623             }
29624             else
29625             {
29626                 new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size);
29627             }
29628
29629             assert ((new_size >= current_size) || (new_size == max_size));
29630
29631             if (gen_number == max_generation)
29632             {
29633                 new_allocation  =  max((new_size - current_size), min_gc_size);
29634
29635                 new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
29636                                                           dd_desired_allocation (dd), dd_collection_count (dd));
29637
29638                 if ((dd_fragmentation (dd) > ((size_t)((f-1)*current_size))))
29639                 {
29640                     //reducing allocation in case of fragmentation
29641                     size_t new_allocation1 = max (min_gc_size,
29642                                                   // CAN OVERFLOW
29643                                                   (size_t)((float)new_allocation * current_size /
29644                                                            ((float)current_size + 2*dd_fragmentation (dd))));
29645                     dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id",
29646                                  new_allocation, new_allocation1));
29647                     new_allocation = new_allocation1;
29648                 }
29649             }
29650             else //large object heap
29651             {
29652                 uint32_t memory_load = 0;
29653                 uint64_t available_physical = 0;
29654                 get_memory_info (&memory_load, &available_physical);
29655                 if (heap_number == 0)
29656                     settings.exit_memory_load = memory_load;
29657                 if (available_physical > 1024*1024)
29658                     available_physical -= 1024*1024;
29659
29660                 uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number));
29661                 if (available_free > (uint64_t)MAX_PTR)
29662                 {
29663                     available_free = (uint64_t)MAX_PTR;
29664                 }
29665
29666                 //try to avoid OOM during large object allocation
29667                 new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))), 
29668                                           (size_t)available_free), 
29669                                       max ((current_size/4), min_gc_size));
29670
29671                 new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
29672                                                           dd_desired_allocation (dd), dd_collection_count (dd));
29673
29674             }
29675         }
29676         else
29677         {
29678             size_t survivors = out;
29679             cst = float (survivors) / float (dd_begin_data_size (dd));
29680             f = surv_to_growth (cst, limit, max_limit);
29681             new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size);
29682
29683             new_allocation = linear_allocation_model (allocation_fraction, new_allocation, 
29684                                                       dd_desired_allocation (dd), dd_collection_count (dd));
29685
29686             if (gen_number == 0)
29687             {
29688                 if (pass == 0)
29689                 {
29690
29691                     //printf ("%f, %Id\n", cst, new_allocation);
29692                     size_t free_space = generation_free_list_space (generation_of (gen_number));
29693                     // DTREVIEW - is min_gc_size really a good choice? 
29694                     // on 64-bit this will almost always be true.
29695                     dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
29696                     if (free_space > min_gc_size)
29697                     {
29698                         settings.gen0_reduction_count = 2;
29699                     }
29700                     else
29701                     {
29702                         if (settings.gen0_reduction_count > 0)
29703                             settings.gen0_reduction_count--;
29704                     }
29705                 }
29706                 if (settings.gen0_reduction_count > 0)
29707                 {
29708                     dprintf (2, ("Reducing new allocation based on fragmentation"));
29709                     new_allocation = min (new_allocation,
29710                                           max (min_gc_size, (max_size/3)));
29711                 }
29712             }
29713         }
29714
29715         size_t new_allocation_ret = 
29716             Align (new_allocation, get_alignment_constant (!(gen_number == (max_generation+1))));
29717         int gen_data_index = gen_number;
29718         gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]);
29719         gen_data->new_allocation = new_allocation_ret;
29720
29721         dd_surv (dd) = cst;
29722
29723 #ifdef SIMPLE_DPRINTF
29724         dprintf (1, ("h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id",
29725                      heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)),
29726                      (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
29727 #else
29728         dprintf (1,("gen: %d in: %Id out: %Id ", gen_number, generation_allocation_size (generation_of (gen_number)), out));
29729         dprintf (1,("current: %Id alloc: %Id ", current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd))));
29730         dprintf (1,(" surv: %d%% f: %d%% new-size: %Id new-alloc: %Id",
29731                     (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
29732 #endif //SIMPLE_DPRINTF
29733
29734         return new_allocation_ret;
29735     }
29736 }
29737
29738 //returns the planned size of a generation (including free list element)
29739 size_t gc_heap::generation_plan_size (int gen_number)
29740 {
29741     if (0 == gen_number)
29742         return max((heap_segment_plan_allocated (ephemeral_heap_segment) -
29743                     generation_plan_allocation_start (generation_of (gen_number))),
29744                    (int)Align (min_obj_size));
29745     else
29746     {
29747         generation* gen = generation_of (gen_number);
29748         if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
29749             return (generation_plan_allocation_start (generation_of (gen_number - 1)) -
29750                     generation_plan_allocation_start (generation_of (gen_number)));
29751         else
29752         {
29753             size_t gensize = 0;
29754             heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
29755
29756             PREFIX_ASSUME(seg != NULL);
29757
29758             while (seg && (seg != ephemeral_heap_segment))
29759             {
29760                 gensize += heap_segment_plan_allocated (seg) -
29761                            heap_segment_mem (seg);
29762                 seg = heap_segment_next_rw (seg);
29763             }
29764             if (seg)
29765             {
29766                 gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) -
29767                             heap_segment_mem (ephemeral_heap_segment));
29768             }
29769             return gensize;
29770         }
29771     }
29772
29773 }
29774
29775 //returns the size of a generation (including free list element)
29776 size_t gc_heap::generation_size (int gen_number)
29777 {
29778     if (0 == gen_number)
29779         return max((heap_segment_allocated (ephemeral_heap_segment) -
29780                     generation_allocation_start (generation_of (gen_number))),
29781                    (int)Align (min_obj_size));
29782     else
29783     {
29784         generation* gen = generation_of (gen_number);
29785         if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
29786             return (generation_allocation_start (generation_of (gen_number - 1)) -
29787                     generation_allocation_start (generation_of (gen_number)));
29788         else
29789         {
29790             size_t gensize = 0;
29791             heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
29792
29793             PREFIX_ASSUME(seg != NULL);
29794
29795             while (seg && (seg != ephemeral_heap_segment))
29796             {
29797                 gensize += heap_segment_allocated (seg) -
29798                            heap_segment_mem (seg);
29799                 seg = heap_segment_next_rw (seg);
29800             }
29801             if (seg)
29802             {
29803                 gensize += (generation_allocation_start (generation_of (gen_number - 1)) -
29804                             heap_segment_mem (ephemeral_heap_segment));
29805             }
29806
29807             return gensize;
29808         }
29809     }
29810
29811 }
29812
29813 size_t  gc_heap::compute_in (int gen_number)
29814 {
29815     assert (gen_number != 0);
29816     dynamic_data* dd = dynamic_data_of (gen_number);
29817
29818     size_t in = generation_allocation_size (generation_of (gen_number));
29819
29820     if (gen_number == max_generation && ephemeral_promotion)
29821     {
29822         in = 0;
29823         for (int i = 0; i <= max_generation; i++)
29824         {
29825             dynamic_data* dd = dynamic_data_of (i);
29826             in += dd_survived_size (dd);
29827             if (i != max_generation)
29828             {
29829                 generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd);
29830             }
29831         }
29832     }
29833
29834     dd_gc_new_allocation (dd) -= in;
29835     dd_new_allocation (dd) = dd_gc_new_allocation (dd);
29836
29837     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
29838     gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
29839     gen_data->in = in;
29840
29841     generation_allocation_size (generation_of (gen_number)) = 0;
29842     return in;
29843 }
29844
29845 void  gc_heap::compute_promoted_allocation (int gen_number)
29846 {
29847     compute_in (gen_number);
29848 }
29849
29850 #ifdef BIT64
29851 inline
29852 size_t gc_heap::trim_youngest_desired (uint32_t memory_load,
29853                                        size_t total_new_allocation,
29854                                        size_t total_min_allocation)
29855 {
29856     if (memory_load < MAX_ALLOWED_MEM_LOAD)
29857     {
29858         // If the total of memory load and gen0 budget exceeds 
29859         // our max memory load limit, trim the gen0 budget so the total 
29860         // is the max memory load limit.
29861         size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent;
29862         return min (total_new_allocation, remain_memory_load);
29863     }
29864     else
29865     {
29866         return max (mem_one_percent, total_min_allocation);
29867     }
29868 }
29869
29870 size_t gc_heap::joined_youngest_desired (size_t new_allocation)
29871 {
29872     dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation));
29873
29874     size_t final_new_allocation = new_allocation;
29875     if (new_allocation > MIN_YOUNGEST_GEN_DESIRED)
29876     {
29877         uint32_t num_heaps = 1;
29878
29879 #ifdef MULTIPLE_HEAPS
29880         num_heaps = gc_heap::n_heaps;
29881 #endif //MULTIPLE_HEAPS
29882
29883         size_t total_new_allocation = new_allocation * num_heaps;
29884         size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps;
29885
29886         if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) ||
29887             (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation)))
29888         {
29889             uint32_t memory_load = 0;
29890             get_memory_info (&memory_load);
29891             settings.exit_memory_load = memory_load;
29892             dprintf (2, ("Current emory load: %d", memory_load));
29893
29894             size_t final_total = 
29895                 trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation);
29896             size_t max_new_allocation = 
29897 #ifdef MULTIPLE_HEAPS
29898                                          dd_max_size (g_heaps[0]->dynamic_data_of (0));
29899 #else //MULTIPLE_HEAPS
29900                                          dd_max_size (dynamic_data_of (0));
29901 #endif //MULTIPLE_HEAPS
29902
29903             final_new_allocation  = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation);
29904         }
29905     }
29906
29907     if (final_new_allocation < new_allocation)
29908     {
29909         settings.gen0_reduction_count = 2;
29910     }
29911
29912     return final_new_allocation;
29913 }
29914 #endif // BIT64 
29915
29916 inline
29917 gc_history_per_heap* gc_heap::get_gc_data_per_heap()
29918 {
29919 #ifdef BACKGROUND_GC
29920     return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap);
29921 #else
29922     return &gc_data_per_heap;
29923 #endif //BACKGROUND_GC
29924 }
29925
29926 void gc_heap::compute_new_dynamic_data (int gen_number)
29927 {
29928     PREFIX_ASSUME(gen_number >= 0);
29929     PREFIX_ASSUME(gen_number <= max_generation);
29930
29931     dynamic_data* dd = dynamic_data_of (gen_number);
29932     generation*   gen = generation_of (gen_number);
29933     size_t        in = (gen_number==0) ? 0 : compute_in (gen_number);
29934
29935     size_t total_gen_size = generation_size (gen_number);
29936     //keep track of fragmentation
29937     dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);
29938     dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
29939
29940     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
29941
29942     size_t out = dd_survived_size (dd);
29943
29944     gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
29945     gen_data->size_after = total_gen_size;
29946     gen_data->free_list_space_after = generation_free_list_space (gen);
29947     gen_data->free_obj_space_after = generation_free_obj_space (gen);
29948
29949     if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1))
29950     {
29951         // When we are in the low latency mode, we can still be
29952         // condemning more than gen1's 'cause of induced GCs.
29953         dd_desired_allocation (dd) = low_latency_alloc;
29954     }
29955     else
29956     {
29957         if (gen_number == 0)
29958         {
29959             //compensate for dead finalizable objects promotion.
29960             //they shoudn't be counted for growth.
29961             size_t final_promoted = 0;
29962             final_promoted = min (promoted_bytes (heap_number), out);
29963             // Prefast: this is clear from above but prefast needs to be told explicitly
29964             PREFIX_ASSUME(final_promoted <= out);
29965
29966             dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted));
29967             dd_freach_previous_promotion (dd) = final_promoted;
29968             size_t lower_bound = desired_new_allocation  (dd, out-final_promoted, gen_number, 0);
29969
29970             if (settings.condemned_generation == 0)
29971             {
29972                 //there is no noise.
29973                 dd_desired_allocation (dd) = lower_bound;
29974             }
29975             else
29976             {
29977                 size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1);
29978
29979                 // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO>
29980                 //assert ( lower_bound <= higher_bound);
29981
29982                 //discount the noise. Change the desired allocation
29983                 //only if the previous value is outside of the range.
29984                 if (dd_desired_allocation (dd) < lower_bound)
29985                 {
29986                     dd_desired_allocation (dd) = lower_bound;
29987                 }
29988                 else if (dd_desired_allocation (dd) > higher_bound)
29989                 {
29990                     dd_desired_allocation (dd) = higher_bound;
29991                 }
29992 #if defined (BIT64) && !defined (MULTIPLE_HEAPS)
29993                 dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd));
29994 #endif // BIT64 && !MULTIPLE_HEAPS
29995                 trim_youngest_desired_low_memory();
29996                 dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd)));
29997             }
29998         }
29999         else
30000         {
30001             dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0);
30002         }
30003     }
30004
30005     gen_data->pinned_surv = dd_pinned_survived_size (dd);
30006     gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);
30007
30008     dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
30009     dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30010
30011     //update counter
30012     dd_promoted_size (dd) = out;
30013     if (gen_number == max_generation)
30014     {
30015         dd = dynamic_data_of (max_generation+1);
30016         total_gen_size = generation_size (max_generation + 1);
30017         dd_fragmentation (dd) = generation_free_list_space (large_object_generation) + 
30018                                 generation_free_obj_space (large_object_generation);
30019         dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
30020         dd_survived_size (dd) = dd_current_size (dd);
30021         in = 0;
30022         out = dd_current_size (dd);
30023         dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
30024         dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
30025                                            get_alignment_constant (FALSE));
30026         dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30027
30028         gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
30029         gen_data->size_after = total_gen_size;
30030         gen_data->free_list_space_after = generation_free_list_space (large_object_generation);
30031         gen_data->free_obj_space_after = generation_free_obj_space (large_object_generation);
30032         gen_data->npinned_surv = out;
30033 #ifdef BACKGROUND_GC
30034         end_loh_size = total_gen_size;
30035 #endif //BACKGROUND_GC
30036         //update counter
30037         dd_promoted_size (dd) = out;
30038     }
30039 }
30040
30041 void gc_heap::trim_youngest_desired_low_memory()
30042 {
30043     if (g_low_memory_status)
30044     {
30045         size_t committed_mem = 0;
30046         heap_segment* seg = generation_start_segment (generation_of (max_generation));
30047         while (seg)
30048         {
30049             committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30050             seg = heap_segment_next (seg);
30051         }
30052         seg = generation_start_segment (generation_of (max_generation + 1));
30053         while (seg)
30054         {
30055             committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30056             seg = heap_segment_next (seg);
30057         }
30058
30059         dynamic_data* dd = dynamic_data_of (0);
30060         size_t current = dd_desired_allocation (dd);
30061         size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd));
30062
30063         dd_desired_allocation (dd) = min (current, candidate);
30064     }
30065 }
30066
30067 void gc_heap::decommit_ephemeral_segment_pages()
30068 {
30069     if (settings.concurrent)
30070     {
30071         return;
30072     }
30073
30074     size_t slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30075     dynamic_data* dd = dynamic_data_of (0);
30076
30077 #ifndef MULTIPLE_HEAPS
30078     size_t extra_space = (g_low_memory_status ? 0 : (512 * 1024));
30079     size_t decommit_timeout = (g_low_memory_status ? 0 : GC_EPHEMERAL_DECOMMIT_TIMEOUT);
30080     size_t ephemeral_elapsed = dd_time_clock(dd) - gc_last_ephemeral_decommit_time;
30081
30082     if (dd_desired_allocation (dd) > gc_gen0_desired_high)
30083     {
30084         gc_gen0_desired_high = dd_desired_allocation (dd) + extra_space;
30085     }
30086
30087     if (ephemeral_elapsed >= decommit_timeout)
30088     {
30089         slack_space = min (slack_space, gc_gen0_desired_high);
30090
30091         gc_last_ephemeral_decommit_time = dd_time_clock(dd);
30092         gc_gen0_desired_high = 0;
30093     }
30094 #endif //!MULTIPLE_HEAPS
30095
30096     if (settings.condemned_generation >= (max_generation-1))
30097     {
30098         size_t new_slack_space = 
30099 #ifdef BIT64
30100                     max(min(min(soh_segment_size/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
30101 #else
30102 #ifdef FEATURE_CORECLR
30103                     dd_desired_allocation (dd);
30104 #else
30105                     dd_max_size (dd);
30106 #endif //FEATURE_CORECLR                                    
30107 #endif // BIT64
30108
30109         slack_space = min (slack_space, new_slack_space);
30110     }
30111
30112     decommit_heap_segment_pages (ephemeral_heap_segment, slack_space);    
30113
30114     gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30115     current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30116 }
30117
30118 size_t gc_heap::new_allocation_limit (size_t size, size_t free_size, int gen_number)
30119 {
30120     dynamic_data* dd        = dynamic_data_of (gen_number);
30121     ptrdiff_t           new_alloc = dd_new_allocation (dd);
30122     assert (new_alloc == (ptrdiff_t)Align (new_alloc,
30123                                            get_alignment_constant (!(gen_number == (max_generation+1)))));
30124     size_t        limit     = min (max (new_alloc, (ptrdiff_t)size), (ptrdiff_t)free_size);
30125     assert (limit == Align (limit, get_alignment_constant (!(gen_number == (max_generation+1)))));
30126     dd_new_allocation (dd) = (new_alloc - limit );
30127     return limit;
30128 }
30129
30130 //This is meant to be called by decide_on_compacting.
30131
30132 size_t gc_heap::generation_fragmentation (generation* gen,
30133                                           generation* consing_gen,
30134                                           uint8_t* end)
30135 {
30136     size_t frag;
30137     uint8_t* alloc = generation_allocation_pointer (consing_gen);
30138     // If the allocation pointer has reached the ephemeral segment
30139     // fine, otherwise the whole ephemeral segment is considered
30140     // fragmentation
30141     if (in_range_for_segment (alloc, ephemeral_heap_segment))
30142         {
30143             if (alloc <= heap_segment_allocated(ephemeral_heap_segment))
30144                 frag = end - alloc;
30145             else
30146             {
30147                 // case when no survivors, allocated set to beginning
30148                 frag = 0;
30149             }
30150             dprintf (3, ("ephemeral frag: %Id", frag));
30151         }
30152     else
30153         frag = (heap_segment_allocated (ephemeral_heap_segment) -
30154                 heap_segment_mem (ephemeral_heap_segment));
30155     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30156
30157     PREFIX_ASSUME(seg != NULL);
30158
30159     while (seg != ephemeral_heap_segment)
30160     {
30161         frag += (heap_segment_allocated (seg) -
30162                  heap_segment_plan_allocated (seg));
30163         dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg,
30164                      (heap_segment_allocated (seg) -
30165                       heap_segment_plan_allocated (seg))));
30166
30167         seg = heap_segment_next_rw (seg);
30168         assert (seg);
30169     }
30170     dprintf (3, ("frag: %Id discounting pinned plugs", frag));
30171     //add the length of the dequeued plug free space
30172     size_t bos = 0;
30173     while (bos < mark_stack_bos)
30174     {
30175         frag += (pinned_len (pinned_plug_of (bos)));
30176         bos++;
30177     }
30178
30179     return frag;
30180 }
30181
30182 // for SOH this returns the total sizes of the generation and its 
30183 // younger generation(s).
30184 // for LOH this returns just LOH size.
30185 size_t gc_heap::generation_sizes (generation* gen)
30186 {
30187     size_t result = 0;
30188     if (generation_start_segment (gen ) == ephemeral_heap_segment)
30189         result = (heap_segment_allocated (ephemeral_heap_segment) -
30190                   generation_allocation_start (gen));
30191     else
30192     {
30193         heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
30194
30195         PREFIX_ASSUME(seg != NULL);
30196
30197         while (seg)
30198         {
30199             result += (heap_segment_allocated (seg) -
30200                        heap_segment_mem (seg));
30201             seg = heap_segment_next_in_range (seg);
30202         }
30203     }
30204
30205     return result;
30206 }
30207
30208 BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
30209                                     size_t fragmentation,
30210                                     BOOL& should_expand)
30211 {
30212     BOOL should_compact = FALSE;
30213     should_expand = FALSE;
30214     generation*   gen = generation_of (condemned_gen_number);
30215     dynamic_data* dd = dynamic_data_of (condemned_gen_number);
30216     size_t gen_sizes     = generation_sizes(gen);
30217     float  fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) :
30218                                     (float (fragmentation) / gen_sizes) );
30219
30220     dprintf (GTC_LOG, ("fragmentation: %Id (%d%%)", fragmentation, (int)(fragmentation_burden * 100.0)));
30221
30222 #ifdef STRESS_HEAP
30223     // for pure GC stress runs we need compaction, for GC stress "mix"
30224     // we need to ensure a better mix of compacting and sweeping collections
30225     if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
30226         && !g_pConfig->IsGCStressMix())
30227         should_compact = TRUE;
30228
30229 #ifdef GC_STATS
30230     // in GC stress "mix" mode, for stress induced collections make sure we 
30231     // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
30232     // against the GC's determination, as it may lead to premature OOMs.
30233     if (g_pConfig->IsGCStressMix() && settings.stress_induced)
30234     {
30235         int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
30236         int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
30237         if (compactions < sweeps / 10)
30238         {
30239             should_compact = TRUE;
30240         }
30241     }
30242 #endif // GC_STATS
30243 #endif //STRESS_HEAP
30244
30245     if (GCConfig::GetForceCompact())
30246         should_compact = TRUE;
30247
30248     if ((condemned_gen_number == max_generation) && last_gc_before_oom)
30249     {
30250         should_compact = TRUE;
30251         last_gc_before_oom = FALSE;
30252         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
30253     }
30254
30255     if (settings.reason == reason_induced_compacting)
30256     {
30257         dprintf (2, ("induced compacting GC"));
30258         should_compact = TRUE;
30259         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting);
30260     }
30261
30262     dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%",
30263                 fragmentation, (int) (100*fragmentation_burden)));
30264
30265     if (!should_compact)
30266     {
30267         if (dt_low_ephemeral_space_p (tuning_deciding_compaction))
30268         {
30269             dprintf(GTC_LOG, ("compacting due to low ephemeral"));
30270             should_compact = TRUE;
30271             get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral);
30272         }
30273     }
30274
30275     if (should_compact)
30276     {
30277         if ((condemned_gen_number >= (max_generation - 1)))
30278         {
30279             if (dt_low_ephemeral_space_p (tuning_deciding_expansion))
30280             {
30281                 dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction"));
30282                 should_expand = TRUE;
30283             }
30284         }
30285     }
30286
30287 #ifdef BIT64
30288     BOOL high_memory = FALSE;
30289 #endif // BIT64
30290
30291     if (!should_compact)
30292     {
30293         // We are not putting this in dt_high_frag_p because it's not exactly
30294         // high fragmentation - it's just enough planned fragmentation for us to 
30295         // want to compact. Also the "fragmentation" we are talking about here
30296         // is different from anywhere else.
30297         BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) &&
30298                                 (fragmentation_burden >= dd_fragmentation_burden_limit (dd)));
30299
30300         if (frag_exceeded)
30301         {
30302 #ifdef BACKGROUND_GC
30303             // do not force compaction if this was a stress-induced GC
30304             IN_STRESS_HEAP(if (!settings.stress_induced))
30305             {
30306 #endif // BACKGROUND_GC
30307             assert (settings.concurrent == FALSE);
30308             should_compact = TRUE;
30309             get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag);
30310 #ifdef BACKGROUND_GC
30311             }
30312 #endif // BACKGROUND_GC
30313         }
30314
30315 #ifdef BIT64
30316         // check for high memory situation
30317         if(!should_compact)
30318         {
30319             uint32_t num_heaps = 1;
30320 #ifdef MULTIPLE_HEAPS
30321             num_heaps = gc_heap::n_heaps;
30322 #endif // MULTIPLE_HEAPS
30323             
30324             ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation);
30325             if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th))
30326             {
30327                 if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps)))
30328                 {
30329                     dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
30330                     should_compact = TRUE;
30331                     get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag);
30332                 }
30333                 high_memory = TRUE;
30334             }
30335             else if(settings.entry_memory_load >= v_high_memory_load_th)
30336             {
30337                 if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps)))
30338                 {
30339                     dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
30340                     should_compact = TRUE;
30341                     get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag);
30342                 }
30343                 high_memory = TRUE;
30344             }
30345         }
30346 #endif // BIT64
30347     }
30348
30349     // The purpose of calling ensure_gap_allocation here is to make sure
30350     // that we actually are able to commit the memory to allocate generation
30351     // starts.
30352     if ((should_compact == FALSE) &&
30353         (ensure_gap_allocation (condemned_gen_number) == FALSE))
30354     {
30355         should_compact = TRUE;
30356         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps);
30357     }
30358
30359     if (settings.condemned_generation == max_generation)
30360     {
30361         //check the progress
30362         if (
30363 #ifdef BIT64
30364             (high_memory && !should_compact) ||
30365 #endif // BIT64
30366             (generation_plan_allocation_start (generation_of (max_generation - 1)) >= 
30367                 generation_allocation_start (generation_of (max_generation - 1))))
30368         {
30369             dprintf (2, (" Elevation: gen2 size: %d, gen2 plan size: %d, no progress, elevation = locked",
30370                      generation_size (max_generation),
30371                      generation_plan_size (max_generation)));
30372             //no progress -> lock
30373             settings.should_lock_elevation = TRUE;
30374         }
30375     }
30376
30377     if (settings.pause_mode == pause_no_gc)
30378     {
30379         should_compact = TRUE;
30380         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment))
30381             < soh_allocation_no_gc)
30382         {
30383             should_expand = TRUE;
30384         }
30385     }
30386
30387     dprintf (2, ("will %s", (should_compact ? "compact" : "sweep")));
30388     return should_compact;
30389 }
30390
30391 size_t align_lower_good_size_allocation (size_t size)
30392 {
30393     return (size/64)*64;
30394 }
30395
30396 size_t gc_heap::approximate_new_allocation()
30397 {
30398     dynamic_data* dd0 = dynamic_data_of (0);
30399     return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3));
30400 }
30401
30402 // After we did a GC we expect to have at least this 
30403 // much space at the end of the segment to satisfy
30404 // a reasonable amount of allocation requests.
30405 size_t gc_heap::end_space_after_gc()
30406 {
30407     return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC + Align (min_obj_size)));
30408 }
30409
30410 BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
30411 {
30412     uint8_t* start = 0;
30413     
30414     if ((tp == tuning_deciding_condemned_gen) ||
30415         (tp == tuning_deciding_compaction))
30416     {
30417         start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment));
30418         if (settings.concurrent)
30419         {
30420             dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (alloc_allocated)", 
30421                 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30422         }
30423         else
30424         {
30425             dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (allocated)", 
30426                 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment))));
30427         }
30428     }
30429     else if (tp == tuning_deciding_expansion)
30430     {
30431         start = heap_segment_plan_allocated (ephemeral_heap_segment);
30432         dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment based on plan", 
30433             (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start)));
30434     }
30435     else
30436     {
30437         assert (tp == tuning_deciding_full_gc);
30438         dprintf (GTC_LOG, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)", 
30439             (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30440         start = alloc_allocated;
30441     }
30442     
30443     if (start == 0) // empty ephemeral generations
30444     {
30445         assert (tp == tuning_deciding_expansion);
30446         // if there are no survivors in the ephemeral segment, 
30447         // this should be the beginning of ephemeral segment.
30448         start = generation_allocation_pointer (generation_of (max_generation));
30449         assert (start == heap_segment_mem (ephemeral_heap_segment));
30450     }
30451
30452     if (tp == tuning_deciding_expansion)
30453     {
30454         assert (settings.condemned_generation >= (max_generation-1));
30455         size_t gen0size = approximate_new_allocation();
30456         size_t eph_size = gen0size;
30457
30458         for (int j = 1; j <= max_generation-1; j++)
30459         {
30460             eph_size += 2*dd_min_size (dynamic_data_of(j));
30461         }
30462         
30463         // We must find room for one large object and enough room for gen0size
30464         if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size)
30465         {
30466             dprintf (3, ("Enough room before end of segment"));
30467             return TRUE;
30468         }
30469         else
30470         {
30471             size_t room = align_lower_good_size_allocation
30472                 (heap_segment_reserved (ephemeral_heap_segment) - start);
30473             size_t end_seg = room;
30474
30475             //look at the plug free space
30476             size_t largest_alloc = END_SPACE_AFTER_GC + Align (min_obj_size);
30477             bool large_chunk_found = FALSE;
30478             size_t bos = 0;
30479             uint8_t* gen0start = generation_plan_allocation_start (youngest_generation);
30480             dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start));
30481             if (gen0start == 0)
30482                 return FALSE;
30483             dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id",
30484                          room, gen0size));
30485             while ((bos < mark_stack_bos) &&
30486                    !((room >= gen0size) && large_chunk_found))
30487             {
30488                 uint8_t* plug = pinned_plug (pinned_plug_of (bos));
30489                 if (in_range_for_segment (plug, ephemeral_heap_segment))
30490                 {
30491                     if (plug >= gen0start)
30492                     {
30493                         size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos)));
30494                         room += chunk;
30495                         if (!large_chunk_found)
30496                         {
30497                             large_chunk_found = (chunk >= largest_alloc);
30498                         }
30499                         dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id",
30500                                      room, large_chunk_found));
30501                     }
30502                 }
30503                 bos++;
30504             }
30505
30506             if (room >= gen0size)
30507             {
30508                 if (large_chunk_found)
30509                 {
30510                     dprintf (3, ("Enough room"));
30511                     return TRUE;
30512                 }
30513                 else
30514                 {
30515                     // now we need to find largest_alloc at the end of the segment.
30516                     if (end_seg >= end_space_after_gc())
30517                     {
30518                         dprintf (3, ("Enough room (may need end of seg)"));
30519                         return TRUE;
30520                     }
30521                 }
30522             }
30523
30524             dprintf (3, ("Not enough room"));
30525                 return FALSE;
30526         }
30527     }
30528     else
30529     {
30530         size_t end_space = 0;
30531         dynamic_data* dd = dynamic_data_of (0);
30532         if ((tp == tuning_deciding_condemned_gen) ||
30533             (tp == tuning_deciding_full_gc))
30534         {
30535             end_space = 2*dd_min_size (dd);
30536         }
30537         else
30538         {
30539             assert (tp == tuning_deciding_compaction);
30540             end_space = approximate_new_allocation();
30541         }
30542
30543         if (!((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > end_space))
30544         {
30545             dprintf (GTC_LOG, ("ephemeral_gen_fit_p: does not fit without compaction"));
30546         }
30547         return ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > end_space);
30548     }
30549 }
30550
30551 CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_bytes)
30552 {
30553     //create a new alloc context because gen3context is shared.
30554     alloc_context acontext;
30555     acontext.alloc_ptr = 0;
30556     acontext.alloc_limit = 0;
30557     acontext.alloc_bytes = 0;
30558 #ifdef MULTIPLE_HEAPS
30559     acontext.set_alloc_heap(vm_heap);
30560 #endif //MULTIPLE_HEAPS
30561
30562 #ifdef MARK_ARRAY
30563     uint8_t* current_lowest_address = lowest_address;
30564     uint8_t* current_highest_address = highest_address;
30565 #ifdef BACKGROUND_GC
30566     if (recursive_gc_sync::background_running_p())
30567     {
30568         current_lowest_address = background_saved_lowest_address;
30569         current_highest_address = background_saved_highest_address;
30570     }
30571 #endif //BACKGROUND_GC
30572 #endif // MARK_ARRAY
30573
30574     #if BIT64
30575     size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
30576     #else
30577     size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size));
30578     #endif
30579
30580     if (jsize >= maxObjectSize)
30581     {
30582         if (GCConfig::GetBreakOnOOM())
30583         {
30584             GCToOSInterface::DebugBreak();
30585         }
30586         return NULL;
30587     }
30588
30589     size_t size = AlignQword (jsize);
30590     int align_const = get_alignment_constant (FALSE);
30591 #ifdef FEATURE_LOH_COMPACTION
30592     size_t pad = Align (loh_padding_obj_size, align_const);
30593 #else
30594     size_t pad = 0;
30595 #endif //FEATURE_LOH_COMPACTION
30596
30597     assert (size >= Align (min_obj_size, align_const));
30598 #ifdef _MSC_VER
30599 #pragma inline_depth(0)
30600 #endif //_MSC_VER
30601     if (! allocate_more_space (&acontext, (size + pad), max_generation+1))
30602     {
30603         return 0;
30604     }
30605
30606 #ifdef _MSC_VER
30607 #pragma inline_depth(20)
30608 #endif //_MSC_VER
30609
30610 #ifdef FEATURE_LOH_COMPACTION
30611     // The GC allocator made a free object already in this alloc context and
30612     // adjusted the alloc_ptr accordingly.
30613 #endif //FEATURE_LOH_COMPACTION
30614
30615     uint8_t*  result = acontext.alloc_ptr;
30616
30617     assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size);
30618     alloc_bytes += size;
30619
30620     CObjectHeader* obj = (CObjectHeader*)result;
30621
30622 #ifdef MARK_ARRAY
30623     if (recursive_gc_sync::background_running_p())
30624     {
30625         if ((result < current_highest_address) && (result >= current_lowest_address))
30626         {
30627             dprintf (3, ("Clearing mark bit at address %Ix",
30628                      (size_t)(&mark_array [mark_word_of (result)])));
30629
30630             mark_array_clear_marked (result);
30631         }
30632 #ifdef BACKGROUND_GC
30633         //the object has to cover one full mark uint32_t
30634         assert (size > mark_word_size);
30635         if (current_c_gc_state == c_gc_state_marking)
30636         {
30637             dprintf (3, ("Concurrent allocation of a large object %Ix",
30638                         (size_t)obj));
30639             //mark the new block specially so we know it is a new object
30640             if ((result < current_highest_address) && (result >= current_lowest_address))
30641             {
30642                 dprintf (3, ("Setting mark bit at address %Ix",
30643                             (size_t)(&mark_array [mark_word_of (result)])));
30644     
30645                 mark_array_set_marked (result);
30646             }
30647         }
30648 #endif //BACKGROUND_GC
30649     }
30650 #endif //MARK_ARRAY
30651
30652     assert (obj != 0);
30653     assert ((size_t)obj == Align ((size_t)obj, align_const));
30654
30655     return obj;
30656 }
30657
30658 void reset_memory (uint8_t* o, size_t sizeo)
30659 {
30660     if (sizeo > 128 * 1024)
30661     {
30662         // We cannot reset the memory for the useful part of a free object.
30663         size_t size_to_skip = min_free_list - plug_skew;
30664
30665         size_t page_start = align_on_page ((size_t)(o + size_to_skip));
30666         size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start;
30667         // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail
30668         // on write watched memory.
30669         if (reset_mm_p)
30670         {
30671 #ifdef MULTIPLE_HEAPS
30672             bool unlock_p = true;
30673 #else
30674             // We don't do unlock because there could be many processes using workstation GC and it's
30675             // bad perf to have many threads doing unlock at the same time.
30676             bool unlock_p = false;
30677 #endif // MULTIPLE_HEAPS
30678
30679             reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p);
30680         }
30681     }
30682 }
30683
30684 void gc_heap::reset_large_object (uint8_t* o)
30685 {
30686     // If it's a large object, allow the O/S to discard the backing store for these pages.
30687     reset_memory (o, size(o));
30688 }
30689
30690 BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
30691 {
30692     BOOL m = FALSE;
30693     // It shouldn't be necessary to do these comparisons because this is only used for blocking
30694     // GCs and LOH segments cannot be out of range.
30695     if ((o >= lowest_address) && (o < highest_address))
30696     {
30697         if (marked (o))
30698         {
30699             if (clearp)
30700             {
30701                 clear_marked (o);
30702                 if (pinned (o))
30703                     clear_pinned(o);
30704             }
30705             m = TRUE;
30706         }
30707         else
30708             m = FALSE;
30709     }
30710     else
30711         m = TRUE;
30712     return m;
30713 }
30714
30715 void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
30716 {
30717     // Now walk the portion of memory that is actually being relocated.
30718     walk_relocation (profiling_context, fn);
30719
30720 #ifdef FEATURE_LOH_COMPACTION
30721     if (loh_compacted_p)
30722     {
30723         walk_relocation_for_loh (profiling_context, fn);
30724     }
30725 #endif //FEATURE_LOH_COMPACTION
30726 }
30727
30728 void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
30729 {
30730     generation* gen        = large_object_generation;
30731     heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));;
30732
30733     PREFIX_ASSUME(seg != NULL);
30734
30735     uint8_t* o                = generation_allocation_start (gen);
30736     uint8_t* plug_end         = o;
30737     uint8_t* plug_start       = o;
30738
30739     while (1)
30740     {
30741         if (o >= heap_segment_allocated (seg))
30742         {
30743             seg = heap_segment_next (seg);
30744             if (seg == 0)
30745                 break;
30746             else
30747                 o = heap_segment_mem (seg);
30748         }
30749         if (large_object_marked(o, FALSE))
30750         {
30751             plug_start = o;
30752
30753             BOOL m = TRUE;
30754             while (m)
30755             {
30756                 o = o + AlignQword (size (o));
30757                 if (o >= heap_segment_allocated (seg))
30758                 {
30759                     break;
30760                 }
30761                 m = large_object_marked (o, FALSE);
30762             }
30763
30764             plug_end = o;
30765
30766             fn (plug_start, plug_end, 0, profiling_context, false, false);
30767         }
30768         else
30769         {
30770             while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
30771             {
30772                 o = o + AlignQword (size (o));
30773             }
30774         }
30775     }
30776 }
30777
30778 #ifdef BACKGROUND_GC
30779
30780 BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp)
30781 {
30782     BOOL m = FALSE;
30783     if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
30784     {
30785         if (mark_array_marked (o))
30786         {
30787             if (clearp)
30788             {
30789                 mark_array_clear_marked (o);
30790                 //dprintf (3, ("mark array bit for object %Ix is cleared", o));
30791                 dprintf (3, ("CM: %Ix", o));
30792             }
30793             m = TRUE;
30794         }
30795         else
30796             m = FALSE;
30797     }
30798     else
30799         m = TRUE;
30800
30801     dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm")));
30802     return m;
30803 }
30804
30805 uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL large_objects_p)
30806 {
30807     return
30808         (large_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg));
30809 }
30810
30811 void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b)
30812 {
30813 #ifdef VERIFY_HEAP
30814     if (end > start)
30815     {
30816         if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
30817            !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL))
30818         {
30819             dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end));
30820             memset (start, b, (end - start));
30821         }
30822     }
30823 #endif //VERIFY_HEAP
30824 }
30825
30826 void gc_heap::generation_delete_heap_segment (generation* gen, 
30827                                               heap_segment* seg,
30828                                               heap_segment* prev_seg,
30829                                               heap_segment* next_seg)
30830 {
30831     dprintf (3, ("bgc sweep: deleting seg %Ix", seg));
30832     if (gen == large_object_generation)
30833     {
30834         heap_segment_next (prev_seg) = next_seg;
30835
30836         dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg));
30837
30838         heap_segment_next (seg) = freeable_large_heap_segment;
30839         freeable_large_heap_segment = seg;
30840     }
30841     else
30842     {
30843         if (seg == ephemeral_heap_segment)
30844         {
30845             FATAL_GC_ERROR();
30846         }
30847
30848         heap_segment_next (next_seg) = prev_seg;
30849
30850         dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
30851         heap_segment_next (seg) = freeable_small_heap_segment;
30852         freeable_small_heap_segment = seg;
30853     }
30854
30855     decommit_heap_segment (seg);
30856     seg->flags |= heap_segment_flags_decommitted;
30857
30858     set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
30859 }
30860
30861 void gc_heap::process_background_segment_end (heap_segment* seg, 
30862                                           generation* gen,
30863                                           uint8_t* last_plug_end,
30864                                           heap_segment* start_seg,
30865                                           BOOL* delete_p)
30866 {
30867     *delete_p = FALSE;
30868     uint8_t* allocated = heap_segment_allocated (seg);
30869     uint8_t* background_allocated = heap_segment_background_allocated (seg);
30870
30871     dprintf (3, ("Processing end of background segment [%Ix, %Ix[(%Ix[)", 
30872                 (size_t)heap_segment_mem (seg), background_allocated, allocated));
30873
30874
30875     if (allocated != background_allocated)
30876     {
30877         if (gen == large_object_generation)
30878         {
30879             FATAL_GC_ERROR();
30880         }
30881
30882         dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[", 
30883                     (size_t)last_plug_end, background_allocated));
30884         thread_gap (last_plug_end, background_allocated - last_plug_end, generation_of (max_generation));
30885
30886         fix_brick_to_highest (last_plug_end, background_allocated);
30887
30888         // When we allowed fgc's during going through gaps, we could have erased the brick
30889         // that corresponds to bgc_allocated 'cause we had to update the brick there, 
30890         // recover it here.
30891         fix_brick_to_highest (background_allocated, background_allocated);
30892     }
30893     else
30894     {
30895         // by default, if allocated == background_allocated, it can't
30896         // be the ephemeral segment.
30897         if (seg == ephemeral_heap_segment)
30898         {
30899             FATAL_GC_ERROR();
30900         }
30901
30902         if (allocated == heap_segment_mem (seg))
30903         {
30904             // this can happen with LOH segments when multiple threads
30905             // allocate new segments and not all of them were needed to
30906             // satisfy allocation requests.
30907             assert (gen == large_object_generation);
30908         }
30909
30910         if (last_plug_end == heap_segment_mem (seg))
30911         {
30912             dprintf (3, ("Segment allocated is %Ix (beginning of this seg) - %s be deleted",
30913                         (size_t)allocated, (*delete_p ? "should" : "should not")));
30914
30915             if (seg != start_seg)
30916             {
30917                 *delete_p = TRUE;
30918             }
30919         }
30920         else
30921         {
30922             dprintf (3, ("Trimming seg to %Ix[", (size_t)last_plug_end));
30923             heap_segment_allocated (seg) = last_plug_end;
30924             set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
30925
30926             decommit_heap_segment_pages (seg, 0);
30927         }
30928     }
30929
30930     dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg));
30931     bgc_verify_mark_array_cleared (seg);
30932 }
30933
30934 void gc_heap::process_n_background_segments (heap_segment* seg, 
30935                                              heap_segment* prev_seg,
30936                                              generation* gen)
30937 {
30938     assert (gen != large_object_generation);
30939
30940     while (seg)
30941     {
30942         dprintf (2, ("processing seg %Ix (not seen by bgc mark)", seg));
30943         heap_segment* next_seg = heap_segment_next (seg);
30944
30945         if (heap_segment_read_only_p (seg))
30946         {
30947             prev_seg = seg;
30948         }
30949         else
30950         {
30951             if (heap_segment_allocated (seg) == heap_segment_mem (seg))
30952             {
30953                 // This can happen - if we have a LOH segment where nothing survived
30954                 // or a SOH segment allocated by a gen1 GC when BGC was going where 
30955                 // nothing survived last time we did a gen1 GC.
30956                 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
30957             }
30958             else
30959             {
30960                 prev_seg = seg;
30961             }
30962         }
30963
30964         verify_soh_segment_list();
30965         seg = next_seg;
30966     }
30967 }
30968
30969 inline
30970 BOOL gc_heap::fgc_should_consider_object (uint8_t* o,
30971                                           heap_segment* seg,
30972                                           BOOL consider_bgc_mark_p, 
30973                                           BOOL check_current_sweep_p, 
30974                                           BOOL check_saved_sweep_p)
30975 {
30976     // the logic for this function must be kept in sync with the analogous function
30977     // in ToolBox\SOS\Strike\gc.cpp
30978
30979     // TRUE means we don't need to check the bgc mark bit
30980     // FALSE means we do.
30981     BOOL no_bgc_mark_p = FALSE;
30982
30983     if (consider_bgc_mark_p)
30984     {
30985         if (check_current_sweep_p && (o < current_sweep_pos))
30986         {
30987             dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos));
30988             no_bgc_mark_p = TRUE;
30989         }
30990
30991         if (!no_bgc_mark_p)
30992         {
30993             if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start))
30994             {
30995                 dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start));
30996                 no_bgc_mark_p = TRUE;
30997             }
30998
30999             if (!check_saved_sweep_p)
31000             {
31001                 uint8_t* background_allocated = heap_segment_background_allocated (seg);
31002                 // if this was the saved ephemeral segment, check_saved_sweep_p 
31003                 // would've been true.
31004                 assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start);
31005                 // background_allocated could be 0 for the new segments acquired during bgc
31006                 // sweep and we still want no_bgc_mark_p to be true.
31007                 if (o >= background_allocated)
31008                 {
31009                     dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated));
31010                     no_bgc_mark_p = TRUE;
31011                 }
31012             }
31013         }
31014     }
31015     else
31016     {
31017         no_bgc_mark_p = TRUE;
31018     }
31019
31020     dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), (background_object_marked (o, FALSE) ? "yes" : "no")));
31021     return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE));
31022 }
31023
31024 // consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all
31025 // if it's TRUE, check_current_sweep_p tells you if you should consider the
31026 // current sweep position or not.
31027 void gc_heap::should_check_bgc_mark (heap_segment* seg, 
31028                                      BOOL* consider_bgc_mark_p, 
31029                                      BOOL* check_current_sweep_p,
31030                                      BOOL* check_saved_sweep_p)
31031 {
31032     // the logic for this function must be kept in sync with the analogous function
31033     // in ToolBox\SOS\Strike\gc.cpp
31034     *consider_bgc_mark_p = FALSE;
31035     *check_current_sweep_p = FALSE;
31036     *check_saved_sweep_p = FALSE;
31037
31038     if (current_c_gc_state == c_gc_state_planning)
31039     {
31040         // We are doing the current_sweep_pos comparison here because we have yet to 
31041         // turn on the swept flag for the segment but in_range_for_segment will return
31042         // FALSE if the address is the same as reserved.
31043         if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg)))
31044         {
31045             dprintf (3, ("seg %Ix is already swept by bgc", seg));
31046         }
31047         else
31048         {
31049             *consider_bgc_mark_p = TRUE;
31050
31051             dprintf (3, ("seg %Ix hasn't been swept by bgc", seg));
31052
31053             if (seg == saved_sweep_ephemeral_seg)
31054             {
31055                 dprintf (3, ("seg %Ix is the saved ephemeral seg", seg));
31056                 *check_saved_sweep_p = TRUE;
31057             }
31058
31059             if (in_range_for_segment (current_sweep_pos, seg))
31060             {
31061                 dprintf (3, ("current sweep pos is %Ix and within seg %Ix", 
31062                               current_sweep_pos, seg));
31063                 *check_current_sweep_p = TRUE;
31064             }
31065         }
31066     }
31067 }
31068
31069 void gc_heap::background_ephemeral_sweep()
31070 {
31071     dprintf (3, ("bgc ephemeral sweep"));
31072
31073     int align_const = get_alignment_constant (TRUE);
31074
31075     saved_sweep_ephemeral_seg = ephemeral_heap_segment;
31076     saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1));
31077
31078     // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list,
31079     // we thread onto a list first then publish it when we are done.
31080     allocator youngest_free_list;
31081     size_t youngest_free_list_space = 0;
31082     size_t youngest_free_obj_space = 0;
31083
31084     youngest_free_list.clear();
31085
31086     for (int i = 0; i <= (max_generation - 1); i++)
31087     {
31088         generation* gen_to_reset = generation_of (i);
31089         assert (generation_free_list_space (gen_to_reset) == 0);
31090         // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added 
31091         // something there.
31092     }
31093
31094     for (int i = (max_generation - 1); i >= 0; i--)
31095     {
31096         generation* current_gen = generation_of (i);
31097         uint8_t* o = generation_allocation_start (current_gen);
31098         //Skip the generation gap object
31099         o = o + Align(size (o), align_const);
31100         uint8_t* end = ((i > 0) ?
31101                      generation_allocation_start (generation_of (i - 1)) : 
31102                      heap_segment_allocated (ephemeral_heap_segment));
31103
31104         uint8_t* plug_end = o;
31105         uint8_t* plug_start = o;
31106         BOOL marked_p = FALSE;
31107
31108         while (o < end)
31109         {
31110             marked_p = background_object_marked (o, TRUE);
31111             if (marked_p)
31112             {
31113                 plug_start = o;
31114                 size_t plug_size = plug_start - plug_end;
31115
31116                 if (i >= 1)
31117                 {
31118                     thread_gap (plug_end, plug_size, current_gen);
31119                 }
31120                 else
31121                 {
31122                     if (plug_size > 0)
31123                     {
31124                         make_unused_array (plug_end, plug_size);
31125                         if (plug_size >= min_free_list)
31126                         {
31127                             youngest_free_list_space += plug_size;
31128                             youngest_free_list.thread_item (plug_end, plug_size);
31129                         }
31130                         else
31131                         {
31132                             youngest_free_obj_space += plug_size;
31133                         }
31134                     }
31135                 }
31136
31137                 fix_brick_to_highest (plug_end, plug_start);
31138                 fix_brick_to_highest (plug_start, plug_start);
31139
31140                 BOOL m = TRUE;
31141                 while (m)
31142                 {
31143                     o = o + Align (size (o), align_const);
31144                     if (o >= end)
31145                     {
31146                         break;
31147                     }
31148
31149                     m = background_object_marked (o, TRUE);
31150                 }
31151                 plug_end = o;
31152                 dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31153             }
31154             else
31155             {
31156                 while ((o < end) && !background_object_marked (o, FALSE))
31157                 {
31158                     o = o + Align (size (o), align_const);
31159                 }
31160             }
31161         }
31162
31163         if (plug_end != end)
31164         {
31165             if (i >= 1)
31166             {
31167                 thread_gap (plug_end, end - plug_end, current_gen);
31168                 fix_brick_to_highest (plug_end, end);
31169             }
31170             else
31171             {
31172                 heap_segment_allocated (ephemeral_heap_segment) = plug_end;
31173                 // the following line is temporary.
31174                 heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end;
31175 #ifdef VERIFY_HEAP
31176                 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
31177                 {
31178                     make_unused_array (plug_end, (end - plug_end));
31179                 }
31180 #endif //VERIFY_HEAP
31181             }
31182         }
31183
31184         dd_fragmentation (dynamic_data_of (i)) = 
31185             generation_free_list_space (current_gen) + generation_free_obj_space (current_gen);
31186     }
31187
31188     generation* youngest_gen = generation_of (0);
31189     generation_free_list_space (youngest_gen) = youngest_free_list_space;
31190     generation_free_obj_space (youngest_gen) = youngest_free_obj_space;
31191     dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space;
31192     generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list);
31193 }
31194
31195 void gc_heap::background_sweep()
31196 {
31197     generation* gen         = generation_of (max_generation);
31198     dynamic_data* dd        = dynamic_data_of (max_generation);
31199     // For SOH segments we go backwards.
31200     heap_segment* start_seg = ephemeral_heap_segment;
31201     PREFIX_ASSUME(start_seg != NULL);
31202     heap_segment* fseg      = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
31203     heap_segment* seg       = start_seg;
31204     uint8_t* o                 = heap_segment_mem (seg);
31205
31206     heap_segment* prev_seg = heap_segment_next (seg);
31207     int align_const        = get_alignment_constant (TRUE);
31208     if (seg == fseg)
31209     {
31210         assert (o == generation_allocation_start (generation_of (max_generation)));
31211         o = o + Align(size (o), align_const);
31212     }
31213
31214     uint8_t* plug_end      = o;
31215     uint8_t* plug_start    = o;
31216     next_sweep_obj         = o;
31217     current_sweep_pos      = o;
31218
31219     //uint8_t* end              = background_next_end (seg, (gen == large_object_generation));
31220     uint8_t* end              = heap_segment_background_allocated (seg);
31221     BOOL delete_p          = FALSE;
31222
31223     //concurrent_print_time_delta ("finished with mark and start with sweep");
31224     concurrent_print_time_delta ("Sw");
31225     dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
31226
31227     //block concurrent allocation for large objects
31228     dprintf (3, ("lh state: planning"));
31229     if (gc_lh_block_event.IsValid())
31230     {
31231         gc_lh_block_event.Reset();
31232     }
31233
31234     for (int i = 0; i <= (max_generation + 1); i++)
31235     {
31236         generation* gen_to_reset = generation_of (i);
31237         generation_allocator (gen_to_reset)->clear();
31238         generation_free_list_space (gen_to_reset) = 0;
31239         generation_free_obj_space (gen_to_reset) = 0;
31240         generation_free_list_allocated (gen_to_reset) = 0;
31241         generation_end_seg_allocated (gen_to_reset) = 0;
31242         generation_condemned_allocated (gen_to_reset) = 0; 
31243         //reset the allocation so foreground gc can allocate into older generation
31244         generation_allocation_pointer (gen_to_reset)= 0;
31245         generation_allocation_limit (gen_to_reset) = 0;
31246         generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset));
31247     }
31248
31249     FIRE_EVENT(BGC2ndNonConEnd);
31250
31251     current_bgc_state = bgc_sweep_soh;
31252     verify_soh_segment_list();
31253
31254 #ifdef FEATURE_BASICFREEZE
31255     if ((generation_start_segment (gen) != ephemeral_heap_segment) &&
31256         ro_segments_in_range)
31257     {
31258         sweep_ro_segments (generation_start_segment (gen));
31259     }
31260 #endif // FEATURE_BASICFREEZE
31261
31262     //TODO BACKGROUND_GC: can we move this to where we switch to the LOH?
31263     if (current_c_gc_state != c_gc_state_planning)
31264     {
31265         current_c_gc_state = c_gc_state_planning;
31266     }
31267
31268     concurrent_print_time_delta ("Swe");
31269
31270     heap_segment* loh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation + 1)));
31271     PREFIX_ASSUME(loh_seg  != NULL);
31272     while (loh_seg )
31273     {
31274         loh_seg->flags &= ~heap_segment_flags_swept;
31275         heap_segment_background_allocated (loh_seg) = heap_segment_allocated (loh_seg);
31276         loh_seg = heap_segment_next_rw (loh_seg);
31277     }
31278
31279 #ifdef MULTIPLE_HEAPS
31280     bgc_t_join.join(this, gc_join_restart_ee);
31281     if (bgc_t_join.joined())
31282 #endif //MULTIPLE_HEAPS 
31283     {
31284 #ifdef MULTIPLE_HEAPS
31285         dprintf(2, ("Starting BGC threads for resuming EE"));
31286         bgc_t_join.restart();
31287 #endif //MULTIPLE_HEAPS
31288     }
31289
31290     if (heap_number == 0)
31291     {
31292         restart_EE ();
31293     }
31294
31295     FIRE_EVENT(BGC2ndConBegin);
31296
31297     background_ephemeral_sweep();
31298
31299 #ifdef MULTIPLE_HEAPS
31300     bgc_t_join.join(this, gc_join_after_ephemeral_sweep);
31301     if (bgc_t_join.joined())
31302 #endif //MULTIPLE_HEAPS
31303     {
31304 #ifdef FEATURE_EVENT_TRACE
31305         bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default, 
31306                                                            GCEventKeyword_GCHeapSurvivalAndMovement, 
31307                                                            GCEventLevel_Information);
31308 #endif //FEATURE_EVENT_TRACE
31309
31310         leave_spin_lock (&gc_lock);
31311
31312 #ifdef MULTIPLE_HEAPS
31313         dprintf(2, ("Starting BGC threads for BGC sweeping"));
31314         bgc_t_join.restart();
31315 #endif //MULTIPLE_HEAPS
31316     }
31317
31318     disable_preemptive (true);
31319
31320     dprintf (2, ("bgs: sweeping gen2 objects"));
31321     dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31322                     (size_t)heap_segment_mem (seg),
31323                     (size_t)heap_segment_allocated (seg),
31324                     (size_t)heap_segment_background_allocated (seg)));
31325
31326     int num_objs = 256;
31327     int current_num_objs = 0;
31328     heap_segment* next_seg = 0;
31329
31330     while (1)
31331     {
31332         if (o >= end)
31333         {
31334             if (gen == large_object_generation)
31335             {
31336                 next_seg = heap_segment_next (seg);
31337             }
31338             else
31339             {
31340                 next_seg = heap_segment_prev (fseg, seg);
31341             }
31342
31343             delete_p = FALSE;
31344
31345             if (!heap_segment_read_only_p (seg))
31346             {
31347                 if (gen == large_object_generation)
31348                 {
31349                     // we can treat all LOH segments as in the bgc domain
31350                     // regardless of whether we saw in bgc mark or not
31351                     // because we don't allow LOH allocations during bgc
31352                     // sweep anyway - the LOH segments can't change.
31353                     process_background_segment_end (seg, gen, plug_end, 
31354                                                     start_seg, &delete_p);
31355                 }
31356                 else
31357                 {
31358                     assert (heap_segment_background_allocated (seg) != 0);
31359                     process_background_segment_end (seg, gen, plug_end, 
31360                                                     start_seg, &delete_p);
31361
31362                     assert (next_seg || !delete_p);
31363                 }
31364             }
31365
31366             if (delete_p)
31367             {
31368                 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
31369             }
31370             else
31371             {
31372                 prev_seg = seg;
31373                 dprintf (2, ("seg %Ix has been swept", seg));
31374                 seg->flags |= heap_segment_flags_swept;
31375             }
31376
31377             verify_soh_segment_list();
31378
31379             seg = next_seg;
31380
31381             dprintf (GTC_LOG, ("seg: %Ix, next_seg: %Ix, prev_seg: %Ix", seg, next_seg, prev_seg));
31382             
31383             if (seg == 0)
31384             {
31385                 generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
31386
31387                 PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
31388
31389                 if (gen != large_object_generation)
31390                 {
31391                     dprintf (2, ("bgs: sweeping gen3 objects"));
31392                     current_bgc_state = bgc_sweep_loh;
31393                     gen = generation_of (max_generation+1);
31394                     start_seg = heap_segment_rw (generation_start_segment (gen));
31395
31396                     PREFIX_ASSUME(start_seg != NULL);
31397
31398                     seg = start_seg;
31399                     prev_seg = 0;
31400                     o = generation_allocation_start (gen);
31401                     assert (method_table (o) == g_gc_pFreeObjectMethodTable);
31402                     align_const = get_alignment_constant (FALSE);
31403                     o = o + Align(size (o), align_const);
31404                     plug_end = o;
31405                     end = heap_segment_allocated (seg);
31406                     dprintf (2, ("sweeping gen3 objects"));
31407                     generation_free_obj_space (gen) = 0;
31408                     generation_allocator (gen)->clear();
31409                     generation_free_list_space (gen) = 0;
31410
31411                     dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31412                                     (size_t)heap_segment_mem (seg),
31413                                     (size_t)heap_segment_allocated (seg),
31414                                     (size_t)heap_segment_background_allocated (seg)));
31415                 }
31416                 else
31417                     break;
31418             }
31419             else
31420             {
31421                 o = heap_segment_mem (seg);
31422                 if (seg == fseg)
31423                 {
31424                     assert (gen != large_object_generation);
31425                     assert (o == generation_allocation_start (generation_of (max_generation)));
31426                     align_const = get_alignment_constant (TRUE);
31427                     o = o + Align(size (o), align_const);
31428                 }
31429
31430                 plug_end = o;
31431                 current_sweep_pos = o;
31432                 next_sweep_obj = o;
31433                 
31434                 allow_fgc();
31435                 end = background_next_end (seg, (gen == large_object_generation));
31436                 dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31437                                 (size_t)heap_segment_mem (seg),
31438                                 (size_t)heap_segment_allocated (seg),
31439                                 (size_t)heap_segment_background_allocated (seg)));
31440             }
31441         }
31442
31443         if ((o < end) && background_object_marked (o, TRUE))
31444         {
31445             plug_start = o;
31446             if (gen == large_object_generation)
31447             {
31448                 dprintf (2, ("loh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end));
31449             }
31450
31451             thread_gap (plug_end, plug_start-plug_end, gen);
31452             if (gen != large_object_generation)
31453             {
31454                 add_gen_free (max_generation, plug_start-plug_end);
31455                 fix_brick_to_highest (plug_end, plug_start);
31456                 // we need to fix the brick for the next plug here 'cause an FGC can
31457                 // happen and can't read a stale brick.
31458                 fix_brick_to_highest (plug_start, plug_start);
31459             }
31460
31461             BOOL m = TRUE;
31462
31463             while (m)
31464             {
31465                 next_sweep_obj = o + Align(size (o), align_const);
31466                 current_num_objs++;
31467                 if (current_num_objs >= num_objs)
31468                 {
31469                     current_sweep_pos = next_sweep_obj;
31470
31471                     allow_fgc();
31472                     current_num_objs = 0;
31473                 }
31474
31475                 o = next_sweep_obj;
31476                 if (o >= end)
31477                 {
31478                     break;
31479                 }
31480
31481                 m = background_object_marked (o, TRUE);
31482             }
31483             plug_end = o;
31484             if (gen != large_object_generation)
31485             {
31486                 add_gen_plug (max_generation, plug_end-plug_start);
31487                 dd_survived_size (dd) += (plug_end - plug_start);
31488             }
31489             dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31490         }
31491         else
31492         {
31493             while ((o < end) && !background_object_marked (o, FALSE))
31494             {
31495                 next_sweep_obj = o + Align(size (o), align_const);;
31496                 current_num_objs++;
31497                 if (current_num_objs >= num_objs)
31498                 {
31499                     current_sweep_pos = plug_end;
31500                     dprintf (1234, ("f: swept till %Ix", current_sweep_pos));
31501                     allow_fgc();
31502                     current_num_objs = 0;
31503                 }
31504
31505                 o = next_sweep_obj;
31506             }
31507         }
31508     }
31509
31510     size_t total_loh_size = generation_size (max_generation + 1);
31511     size_t total_soh_size = generation_sizes (generation_of (max_generation));
31512
31513     dprintf (GTC_LOG, ("h%d: S: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
31514
31515     dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id", 
31516         generation_free_list_space (generation_of (max_generation)),
31517         generation_free_obj_space (generation_of (max_generation))));
31518     dprintf (GTC_LOG, ("h%d: end of bgc sweep: gen3 FL: %Id, FO: %Id", 
31519         heap_number,
31520         generation_free_list_space (generation_of (max_generation + 1)),
31521         generation_free_obj_space (generation_of (max_generation + 1))));
31522
31523     FIRE_EVENT(BGC2ndConEnd);
31524     concurrent_print_time_delta ("background sweep");
31525     
31526     heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
31527     PREFIX_ASSUME(reset_seg != NULL);
31528
31529     while (reset_seg)
31530     {
31531         heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg);
31532         heap_segment_background_allocated (reset_seg) = 0;
31533         reset_seg = heap_segment_next_rw (reset_seg);
31534     }
31535
31536     // We calculate dynamic data here because if we wait till we signal the lh event, 
31537     // the allocation thread can change the fragmentation and we may read an intermediate
31538     // value (which can be greater than the generation size). Plus by that time it won't 
31539     // be accurate.
31540     compute_new_dynamic_data (max_generation);
31541
31542     enable_preemptive ();
31543
31544 #ifdef MULTIPLE_HEAPS
31545     bgc_t_join.join(this, gc_join_set_state_free);
31546     if (bgc_t_join.joined())
31547 #endif //MULTIPLE_HEAPS
31548     {
31549         // TODO: We are using this join just to set the state. Should
31550         // look into eliminating it - check to make sure things that use 
31551         // this state can live with per heap state like should_check_bgc_mark.
31552         current_c_gc_state = c_gc_state_free;
31553
31554 #ifdef MULTIPLE_HEAPS
31555         dprintf(2, ("Starting BGC threads after background sweep phase"));
31556         bgc_t_join.restart();
31557 #endif //MULTIPLE_HEAPS
31558     }
31559
31560     disable_preemptive (true);
31561
31562     if (gc_lh_block_event.IsValid())
31563     {
31564         gc_lh_block_event.Set();
31565     }
31566
31567     //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
31568     dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index)));
31569 }
31570 #endif //BACKGROUND_GC
31571
31572 void gc_heap::sweep_large_objects ()
31573 {
31574     //this min value is for the sake of the dynamic tuning.
31575     //so we know that we are not starting even if we have no
31576     //survivors.
31577     generation* gen        = large_object_generation;
31578     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
31579
31580     PREFIX_ASSUME(start_seg != NULL);
31581
31582     heap_segment* seg      = start_seg;
31583     heap_segment* prev_seg = 0;
31584     uint8_t* o             = generation_allocation_start (gen);
31585     int align_const        = get_alignment_constant (FALSE);
31586
31587     //Skip the generation gap object
31588     o = o + Align(size (o), align_const);
31589
31590     uint8_t* plug_end         = o;
31591     uint8_t* plug_start       = o;
31592
31593     generation_allocator (gen)->clear();
31594     generation_free_list_space (gen) = 0;
31595     generation_free_obj_space (gen) = 0;
31596
31597
31598     dprintf (3, ("sweeping large objects"));
31599     dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix", 
31600                  (size_t)seg,
31601                  (size_t)heap_segment_mem (seg),
31602                  (size_t)heap_segment_allocated (seg),
31603                  o));
31604
31605     while (1)
31606     {
31607         if (o >= heap_segment_allocated (seg))
31608         {
31609             heap_segment* next_seg = heap_segment_next (seg);
31610             //delete the empty segment if not the only one
31611             if ((plug_end == heap_segment_mem (seg)) &&
31612                 (seg != start_seg) && !heap_segment_read_only_p (seg))
31613             {
31614                 //prepare for deletion
31615                 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
31616                 assert (prev_seg);
31617                 heap_segment_next (prev_seg) = next_seg;
31618                 heap_segment_next (seg) = freeable_large_heap_segment;
31619                 freeable_large_heap_segment = seg;
31620             }
31621             else
31622             {
31623                 if (!heap_segment_read_only_p (seg))
31624                 {
31625                     dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end));
31626                     heap_segment_allocated (seg) = plug_end;
31627                     decommit_heap_segment_pages (seg, 0);
31628                 }
31629                 prev_seg = seg;
31630             }
31631             seg = next_seg;
31632             if (seg == 0)
31633                 break;
31634             else
31635             {
31636                 o = heap_segment_mem (seg);
31637                 plug_end = o;
31638                 dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg,
31639                              (size_t)heap_segment_mem (seg),
31640                              (size_t)heap_segment_allocated (seg)));
31641             }
31642         }
31643         if (large_object_marked(o, TRUE))
31644         {
31645             plug_start = o;
31646             //everything between plug_end and plug_start is free
31647             thread_gap (plug_end, plug_start-plug_end, gen);
31648
31649             BOOL m = TRUE;
31650             while (m)
31651             {
31652                 o = o + AlignQword (size (o));
31653                 if (o >= heap_segment_allocated (seg))
31654                 {
31655                     break;
31656                 }
31657                 m = large_object_marked (o, TRUE);
31658             }
31659             plug_end = o;
31660             dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31661         }
31662         else
31663         {
31664             while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
31665             {
31666                 o = o + AlignQword (size (o));
31667             }
31668         }
31669     }
31670
31671     generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
31672
31673     PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
31674 }
31675
31676 void gc_heap::relocate_in_large_objects ()
31677 {
31678     relocate_args args;
31679     args.low = gc_low;
31680     args.high = gc_high;
31681     args.last_plug = 0;
31682
31683     generation* gen = large_object_generation;
31684
31685     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
31686
31687     PREFIX_ASSUME(seg != NULL);
31688
31689     uint8_t* o = generation_allocation_start (gen);
31690
31691     while (1)
31692     {
31693         if (o >= heap_segment_allocated (seg))
31694         {
31695             seg = heap_segment_next_rw (seg);
31696             if (seg == 0)
31697                 break;
31698             else
31699             {
31700                 o = heap_segment_mem (seg);
31701             }
31702         }
31703         while (o < heap_segment_allocated (seg))
31704         {
31705             check_class_object_demotion (o);
31706             if (contain_pointers (o))
31707             {
31708                 dprintf(3, ("Relocating through large object %Ix", (size_t)o));
31709                 go_through_object_nostart (method_table (o), o, size(o), pval,
31710                         {
31711                             reloc_survivor_helper (pval);
31712                         });
31713             }
31714             o = o + AlignQword (size (o));
31715         }
31716     }
31717 }
31718
31719 void gc_heap::mark_through_cards_for_large_objects (card_fn fn,
31720                                                     BOOL relocating)
31721 {
31722     uint8_t*      low               = gc_low;
31723     size_t        end_card          = 0;
31724     generation*   oldest_gen        = generation_of (max_generation+1);
31725     heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
31726
31727     PREFIX_ASSUME(seg != NULL);
31728
31729     uint8_t*      beg               = generation_allocation_start (oldest_gen);
31730     uint8_t*      end               = heap_segment_allocated (seg);
31731
31732     size_t  cg_pointers_found = 0;
31733
31734     size_t  card_word_end = (card_of (align_on_card_word (end)) /
31735                              card_word_width);
31736
31737     size_t      n_eph             = 0;
31738     size_t      n_gen             = 0;
31739     size_t      n_card_set        = 0;
31740     uint8_t*    next_boundary = (relocating ?
31741                               generation_plan_allocation_start (generation_of (max_generation -1)) :
31742                               ephemeral_low);
31743
31744     uint8_t*    nhigh         = (relocating ?
31745                               heap_segment_plan_allocated (ephemeral_heap_segment) :
31746                               ephemeral_high);
31747
31748     BOOL          foundp            = FALSE;
31749     uint8_t*      start_address     = 0;
31750     uint8_t*      limit             = 0;
31751     size_t        card              = card_of (beg);
31752     uint8_t*      o                 = beg;
31753 #ifdef BACKGROUND_GC
31754     BOOL consider_bgc_mark_p        = FALSE;
31755     BOOL check_current_sweep_p      = FALSE;
31756     BOOL check_saved_sweep_p        = FALSE;
31757     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
31758 #endif //BACKGROUND_GC
31759
31760     size_t total_cards_cleared = 0;
31761
31762     //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end));
31763     dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end));
31764     while (1)
31765     {
31766         if ((o < end) && (card_of(o) > card))
31767         {
31768             dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
31769             if (cg_pointers_found == 0)
31770             {
31771                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)o));
31772                 clear_cards (card, card_of((uint8_t*)o));
31773                 total_cards_cleared += (card_of((uint8_t*)o) - card);
31774             }
31775             n_eph +=cg_pointers_found;
31776             cg_pointers_found = 0;
31777             card = card_of ((uint8_t*)o);
31778         }
31779         if ((o < end) &&(card >= end_card))
31780         {
31781             foundp = find_card (card_table, card, card_word_end, end_card);
31782             if (foundp)
31783             {
31784                 n_card_set+= end_card - card;
31785                 start_address = max (beg, card_address (card));
31786             }
31787             limit = min (end, card_address (end_card));
31788         }
31789         if ((!foundp) || (o >= end) || (card_address (card) >= end))
31790         {
31791             if ((foundp) && (cg_pointers_found == 0))
31792             {
31793                 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
31794                            (size_t)card_address(card+1)));
31795                 clear_cards (card, card+1);
31796                 total_cards_cleared += 1;
31797             }
31798             n_eph +=cg_pointers_found;
31799             cg_pointers_found = 0;
31800             if ((seg = heap_segment_next_rw (seg)) != 0)
31801             {
31802 #ifdef BACKGROUND_GC
31803                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
31804 #endif //BACKGROUND_GC
31805                 beg = heap_segment_mem (seg);
31806                 end = compute_next_end (seg, low);
31807                 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
31808                 card = card_of (beg);
31809                 o  = beg;
31810                 end_card = 0;
31811                 continue;
31812             }
31813             else
31814             {
31815                 break;
31816             }
31817         }
31818
31819         assert (card_set_p (card));
31820         {
31821             dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ",
31822                        card, (size_t)o, (size_t)limit));
31823
31824             assert (Align (size (o)) >= Align (min_obj_size));
31825             size_t s = size (o);
31826             uint8_t* next_o =  o + AlignQword (s);
31827             Prefetch (next_o);
31828
31829             while (o < limit)
31830             {
31831                 s = size (o);
31832                 assert (Align (s) >= Align (min_obj_size));
31833                 next_o =  o + AlignQword (s);
31834                 Prefetch (next_o);
31835
31836                 dprintf (4, ("|%Ix|", (size_t)o));
31837                 if (next_o < start_address)
31838                 {
31839                     goto end_object;
31840                 }
31841
31842 #ifdef BACKGROUND_GC
31843                 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
31844                 {
31845                     goto end_object;
31846                 }
31847 #endif //BACKGROUND_GC
31848
31849 #ifdef COLLECTIBLE_CLASS
31850                 if (is_collectible(o))
31851                 {
31852                     BOOL passed_end_card_p = FALSE;
31853
31854                     if (card_of (o) > card)
31855                     {
31856                         passed_end_card_p = card_transition (o, end, card_word_end,
31857                             cg_pointers_found, 
31858                             n_eph, n_card_set,
31859                             card, end_card,
31860                             foundp, start_address,
31861                             limit, total_cards_cleared);
31862                     }
31863
31864                     if ((!passed_end_card_p || foundp) && (card_of (o) == card))
31865                     {
31866                         // card is valid and it covers the head of the object
31867                         if (fn == &gc_heap::relocate_address)
31868                         {
31869                             keep_card_live (o, n_gen, cg_pointers_found);
31870                         }
31871                         else
31872                         {
31873                             uint8_t* class_obj = get_class_object (o);
31874                             mark_through_cards_helper (&class_obj, n_gen,
31875                                                     cg_pointers_found, fn,
31876                                                     nhigh, next_boundary);
31877                         }
31878                     }
31879
31880                     if (passed_end_card_p)
31881                     {
31882                         if (foundp && (card_address (card) < next_o))
31883                         {
31884                             goto go_through_refs;
31885                         }
31886                         else 
31887                         {
31888                             goto end_object;
31889                         }
31890                     }
31891                 }
31892
31893 go_through_refs:
31894 #endif //COLLECTIBLE_CLASS
31895
31896                 if (contain_pointers (o))
31897                 {
31898                     dprintf(3,("Going through %Ix", (size_t)o));
31899
31900                     go_through_object (method_table(o), o, s, poo,
31901                                        start_address, use_start, (o + s),
31902                        {
31903                            if (card_of ((uint8_t*)poo) > card)
31904                            {
31905                                 BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
31906                                         card_word_end,
31907                                         cg_pointers_found, 
31908                                         n_eph, n_card_set,
31909                                         card, end_card,
31910                                         foundp, start_address,
31911                                         limit, total_cards_cleared);
31912
31913                                 if (passed_end_card_p)
31914                                 {
31915                                     if (foundp && (card_address (card) < next_o))
31916                                     {
31917                                         //new_start();
31918                                         {
31919                                             if (ppstop <= (uint8_t**)start_address)
31920                                             {break;}
31921                                             else if (poo < (uint8_t**)start_address)
31922                                             {poo = (uint8_t**)start_address;}
31923                                         }
31924                                     }
31925                                     else
31926                                     {
31927                                         goto end_object;
31928                                     }
31929                                 }
31930                             }
31931
31932                            mark_through_cards_helper (poo, n_gen,
31933                                                       cg_pointers_found, fn,
31934                                                       nhigh, next_boundary);
31935                        }
31936                         );
31937                 }
31938
31939             end_object:
31940                 o = next_o;
31941             }
31942
31943         }
31944     }
31945
31946     // compute the efficiency ratio of the card table
31947     if (!relocating)
31948     {
31949         generation_skip_ratio = min (((n_eph > 800) ?
31950                                       (int)(((float)n_gen / (float)n_eph) * 100) : 100),
31951                                      generation_skip_ratio);
31952
31953         dprintf (3, ("Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d", 
31954              n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio));
31955     }
31956     else
31957     {
31958         dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d", 
31959              n_eph, n_gen, n_card_set, generation_skip_ratio));
31960     }
31961 }
31962
31963 void gc_heap::descr_segment (heap_segment* seg )
31964 {
31965 #ifdef TRACE_GC
31966     uint8_t*  x = heap_segment_mem (seg);
31967     while (x < heap_segment_allocated (seg))
31968     {
31969         dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
31970         x = x + Align(size (x));
31971     }
31972 #else // TRACE_GC
31973     UNREFERENCED_PARAMETER(seg);
31974 #endif // TRACE_GC
31975 }
31976
31977 void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
31978 {
31979 #ifdef MULTIPLE_HEAPS
31980     int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
31981     for (int i = 0; i < n_heaps; i++)
31982     {
31983         gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
31984 #else //MULTIPLE_HEAPS
31985     {
31986         gc_heap* hp = NULL;
31987 #ifdef _PREFAST_
31988         // prefix complains about us dereferencing hp in wks build even though we only access static members
31989         // this way. not sure how to shut it up except for this ugly workaround:
31990         PREFIX_ASSUME(hp != NULL);
31991 #endif // _PREFAST_
31992 #endif //MULTIPLE_HEAPS
31993
31994         int curr_gen_number0 = max_generation+1;
31995         while (curr_gen_number0 >= 0)
31996         {
31997             generation* gen = hp->generation_of (curr_gen_number0);
31998             heap_segment* seg = generation_start_segment (gen);
31999             while (seg && (seg != hp->ephemeral_heap_segment))
32000             {
32001                 assert (curr_gen_number0 > 0);
32002
32003                 // report bounds from heap_segment_mem (seg) to
32004                 // heap_segment_allocated (seg);
32005                 // for generation # curr_gen_number0
32006                 // for heap # heap_no
32007
32008                 fn(context, curr_gen_number0, heap_segment_mem (seg),
32009                                               heap_segment_allocated (seg),
32010                                               curr_gen_number0 == max_generation+1 ? heap_segment_reserved (seg) : heap_segment_allocated (seg));
32011
32012                 seg = heap_segment_next (seg);
32013             }
32014             if (seg)
32015             {
32016                 assert (seg == hp->ephemeral_heap_segment);
32017                 assert (curr_gen_number0 <= max_generation);
32018                 //
32019                 if (curr_gen_number0 == max_generation)
32020                 {
32021                     if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1)))
32022                     {
32023                         // report bounds from heap_segment_mem (seg) to
32024                         // generation_allocation_start (generation_of (max_generation-1))
32025                         // for heap # heap_number
32026
32027                         fn(context, curr_gen_number0, heap_segment_mem (seg),
32028                                                       generation_allocation_start (hp->generation_of (max_generation-1)),
32029                                                       generation_allocation_start (hp->generation_of (max_generation-1)) );
32030                     }
32031                 }
32032                 else if (curr_gen_number0 != 0)
32033                 {
32034                     //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32035                     // to generation_allocation_start (generation_of (curr_gen_number0-1))
32036                     // for heap # heap_number
32037
32038                     fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32039                                                   generation_allocation_start (hp->generation_of (curr_gen_number0-1)),
32040                                                   generation_allocation_start (hp->generation_of (curr_gen_number0-1)));
32041                 }
32042                 else
32043                 {
32044                     //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32045                     // to heap_segment_allocated (ephemeral_heap_segment);
32046                     // for heap # heap_number
32047
32048                     fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32049                                                   heap_segment_allocated (hp->ephemeral_heap_segment),
32050                                                   heap_segment_reserved (hp->ephemeral_heap_segment) );
32051                 }
32052             }
32053             curr_gen_number0--;
32054         }
32055     }
32056 }
32057
32058 #ifdef TRACE_GC
32059 // Note that when logging is on it can take a long time to go through the free items.
32060 void gc_heap::print_free_list (int gen, heap_segment* seg)
32061 {
32062     UNREFERENCED_PARAMETER(gen);
32063     UNREFERENCED_PARAMETER(seg);
32064 /*
32065     if (settings.concurrent == FALSE)
32066     {
32067         uint8_t* seg_start = heap_segment_mem (seg);
32068         uint8_t* seg_end = heap_segment_allocated (seg);
32069
32070         dprintf (3, ("Free list in seg %Ix:", seg_start));
32071
32072         size_t total_free_item = 0;
32073
32074         allocator* gen_allocator = generation_allocator (generation_of (gen));
32075         for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++)
32076         {
32077             uint8_t* fo = gen_allocator->alloc_list_head_of (b);
32078             while (fo)
32079             {
32080                 if (fo >= seg_start && fo < seg_end)
32081                 {
32082                     total_free_item++;
32083
32084                     size_t free_item_len = size(fo);
32085
32086                     dprintf (3, ("[%Ix, %Ix[:%Id",
32087                                  (size_t)fo,
32088                                  (size_t)(fo + free_item_len),
32089                                  free_item_len));
32090                 }
32091
32092                 fo = free_list_slot (fo);
32093             }
32094         }
32095
32096         dprintf (3, ("total %Id free items", total_free_item));
32097     }
32098 */
32099 }
32100 #endif //TRACE_GC
32101
32102 void gc_heap::descr_generations (BOOL begin_gc_p)
32103 {
32104     UNREFERENCED_PARAMETER(begin_gc_p);
32105 #ifdef STRESS_LOG
32106     if (StressLog::StressLogOn(LF_GC, LL_INFO10))
32107     {
32108         gc_heap* hp = 0;
32109 #ifdef MULTIPLE_HEAPS
32110         hp= this;
32111 #endif //MULTIPLE_HEAPS
32112
32113         STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp);
32114         for (int n = max_generation; n >= 0; --n)
32115         {
32116             STRESS_LOG4(LF_GC, LL_INFO10, "    Generation %d [%p, %p] cur = %p\n",
32117                     n,
32118                     generation_allocation_start(generation_of(n)),
32119                     generation_allocation_limit(generation_of(n)),
32120                     generation_allocation_pointer(generation_of(n)));
32121
32122             heap_segment* seg = generation_start_segment(generation_of(n));
32123             while (seg)
32124             {
32125                 STRESS_LOG4(LF_GC, LL_INFO10, "        Segment mem %p alloc = %p used %p committed %p\n",
32126                         heap_segment_mem(seg),
32127                         heap_segment_allocated(seg),
32128                         heap_segment_used(seg),
32129                         heap_segment_committed(seg));
32130                 seg = heap_segment_next(seg);
32131             }
32132         }
32133     }
32134 #endif  // STRESS_LOG
32135
32136 #ifdef TRACE_GC
32137     dprintf (2, ("lowest_address: %Ix highest_address: %Ix",
32138              (size_t) lowest_address, (size_t) highest_address));
32139 #ifdef BACKGROUND_GC
32140     dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix",
32141              (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address));
32142 #endif //BACKGROUND_GC
32143
32144     if (heap_number == 0)
32145     {
32146         dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size()));
32147     }
32148
32149     int curr_gen_number = max_generation+1;
32150     while (curr_gen_number >= 0)
32151     {
32152         size_t total_gen_size = generation_size (curr_gen_number);
32153 #ifdef SIMPLE_DPRINTF
32154         dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s",
32155                       (begin_gc_p ? "BEG" : "END"),
32156                       settings.condemned_generation,
32157                       curr_gen_number,
32158                       total_gen_size,
32159                       dd_fragmentation (dynamic_data_of (curr_gen_number)),
32160                       generation_free_list_space (generation_of (curr_gen_number)),
32161                       generation_free_obj_space (generation_of (curr_gen_number)),
32162                       (total_gen_size ? 
32163                         (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) :
32164                         0),
32165                       (begin_gc_p ? ("") : (settings.compaction ? "(compact)" : "(sweep)")),
32166                       (settings.heap_expansion ? "(EX)" : " "),
32167                       (settings.promotion ? "Promotion" : "NoPromotion")));
32168 #else
32169         dprintf (2, ( "Generation %d: gap size: %d, generation size: %Id, fragmentation: %Id",
32170                       curr_gen_number,
32171                       size (generation_allocation_start (generation_of (curr_gen_number))),
32172                       total_gen_size,
32173                       dd_fragmentation (dynamic_data_of (curr_gen_number))));
32174 #endif //SIMPLE_DPRINTF
32175
32176         generation* gen = generation_of (curr_gen_number);
32177         heap_segment* seg = generation_start_segment (gen);
32178         while (seg && (seg != ephemeral_heap_segment))
32179         {
32180             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
32181                         curr_gen_number,
32182                         (size_t)heap_segment_mem (seg),
32183                         (size_t)heap_segment_allocated (seg),
32184                         (size_t)heap_segment_committed (seg),
32185                         (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)),
32186                         (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg))));
32187             print_free_list (curr_gen_number, seg);
32188             seg = heap_segment_next (seg);
32189         }
32190         if (seg && (seg != generation_start_segment (gen)))
32191         {
32192             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32193                          curr_gen_number,
32194                          (size_t)heap_segment_mem (seg),
32195                          (size_t)generation_allocation_start (generation_of (curr_gen_number-1))));
32196             print_free_list (curr_gen_number, seg);
32197
32198         }
32199         else if (seg)
32200         {
32201             dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32202                          curr_gen_number,
32203                          (size_t)generation_allocation_start (generation_of (curr_gen_number)),
32204                          (size_t)(((curr_gen_number == 0)) ?
32205                                   (heap_segment_allocated
32206                                    (generation_start_segment
32207                                     (generation_of (curr_gen_number)))) :
32208                                   (generation_allocation_start
32209                                    (generation_of (curr_gen_number - 1))))
32210                          ));
32211             print_free_list (curr_gen_number, seg);
32212         }
32213         curr_gen_number--;
32214     }
32215
32216 #endif //TRACE_GC
32217 }
32218
32219 #undef TRACE_GC
32220
32221 //#define TRACE_GC
32222
32223 //-----------------------------------------------------------------------------
32224 //
32225 //                                  VM Specific support
32226 //
32227 //-----------------------------------------------------------------------------
32228
32229
32230 #ifdef TRACE_GC
32231
32232  unsigned int PromotedObjectCount  = 0;
32233  unsigned int CreatedObjectCount       = 0;
32234  unsigned int AllocDuration            = 0;
32235  unsigned int AllocCount               = 0;
32236  unsigned int AllocBigCount            = 0;
32237  unsigned int AllocSmallCount      = 0;
32238  unsigned int AllocStart             = 0;
32239 #endif //TRACE_GC
32240
32241 //Static member variables.
32242 VOLATILE(BOOL)    GCHeap::GcInProgress            = FALSE;
32243 //GCTODO
32244 //CMCSafeLock*      GCHeap::fGcLock;
32245 GCEvent            *GCHeap::WaitForGCEvent         = NULL;
32246 //GCTODO
32247 #ifdef TRACE_GC
32248 unsigned int       GCHeap::GcDuration;
32249 #endif //TRACE_GC
32250 unsigned            GCHeap::GcCondemnedGeneration   = 0;
32251 size_t              GCHeap::totalSurvivedSize       = 0;
32252 #ifdef FEATURE_PREMORTEM_FINALIZATION
32253 CFinalize*          GCHeap::m_Finalize              = 0;
32254 BOOL                GCHeap::GcCollectClasses        = FALSE;
32255 VOLATILE(int32_t)      GCHeap::m_GCFLock               = 0;
32256
32257 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
32258 #ifdef STRESS_HEAP
32259 #ifdef BACKGROUND_GC
32260 int                 GCHeap::gc_stress_fgcs_in_bgc   = 0;
32261 #endif // BACKGROUND_GC
32262 #ifndef MULTIPLE_HEAPS
32263 OBJECTHANDLE        GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
32264 int                 GCHeap::m_CurStressObj          = 0;
32265 #endif // !MULTIPLE_HEAPS
32266 #endif // STRESS_HEAP
32267 #endif // FEATURE_REDHAWK
32268
32269 #endif //FEATURE_PREMORTEM_FINALIZATION
32270
32271 class NoGCRegionLockHolder
32272 {
32273 public:
32274     NoGCRegionLockHolder()
32275     {
32276         enter_spin_lock_noinstru(&g_no_gc_lock);
32277     }
32278
32279     ~NoGCRegionLockHolder()
32280     {
32281         leave_spin_lock_noinstru(&g_no_gc_lock);
32282     }
32283 };
32284
32285 // An explanation of locking for finalization:
32286 //
32287 // Multiple threads allocate objects.  During the allocation, they are serialized by
32288 // the AllocLock above.  But they release that lock before they register the object
32289 // for finalization.  That's because there is much contention for the alloc lock, but
32290 // finalization is presumed to be a rare case.
32291 //
32292 // So registering an object for finalization must be protected by the FinalizeLock.
32293 //
32294 // There is another logical queue that involves finalization.  When objects registered
32295 // for finalization become unreachable, they are moved from the "registered" queue to
32296 // the "unreachable" queue.  Note that this only happens inside a GC, so no other
32297 // threads can be manipulating either queue at that time.  Once the GC is over and
32298 // threads are resumed, the Finalizer thread will dequeue objects from the "unreachable"
32299 // queue and call their finalizers.  This dequeue operation is also protected with
32300 // the finalize lock.
32301 //
32302 // At first, this seems unnecessary.  Only one thread is ever enqueuing or dequeuing
32303 // on the unreachable queue (either the GC thread during a GC or the finalizer thread
32304 // when a GC is not in progress).  The reason we share a lock with threads enqueuing
32305 // on the "registered" queue is that the "registered" and "unreachable" queues are
32306 // interrelated.
32307 //
32308 // They are actually two regions of a longer list, which can only grow at one end.
32309 // So to enqueue an object to the "registered" list, you actually rotate an unreachable
32310 // object at the boundary between the logical queues, out to the other end of the
32311 // unreachable queue -- where all growing takes place.  Then you move the boundary
32312 // pointer so that the gap we created at the boundary is now on the "registered"
32313 // side rather than the "unreachable" side.  Now the object can be placed into the
32314 // "registered" side at that point.  This is much more efficient than doing moves
32315 // of arbitrarily long regions, but it causes the two queues to require a shared lock.
32316 //
32317 // Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock.  Instead, it relies
32318 // on the fact that the lock will only be taken for a brief period and that it will
32319 // never provoke or allow a GC while the lock is held.  This is critical.  If the
32320 // FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to
32321 // allow a GC), then the Alloc client would have to GC protect a finalizable object
32322 // to protect against that eventuality.  That is too slow!
32323
32324
32325
32326 BOOL IsValidObject99(uint8_t *pObject)
32327 {
32328 #ifdef VERIFY_HEAP
32329     if (!((CObjectHeader*)pObject)->IsFree())
32330         ((CObjectHeader *) pObject)->Validate();
32331 #endif //VERIFY_HEAP
32332     return(TRUE);
32333 }
32334
32335 #ifdef BACKGROUND_GC 
32336 BOOL gc_heap::bgc_mark_array_range (heap_segment* seg, 
32337                                     BOOL whole_seg_p,
32338                                     uint8_t** range_beg,
32339                                     uint8_t** range_end)
32340 {
32341     uint8_t* seg_start = heap_segment_mem (seg);
32342     uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg)));
32343
32344     if ((seg_start < background_saved_highest_address) &&
32345         (seg_end > background_saved_lowest_address))
32346     {
32347         *range_beg = max (seg_start, background_saved_lowest_address);
32348         *range_end = min (seg_end, background_saved_highest_address);
32349         return TRUE;
32350     }
32351     else
32352     {
32353         return FALSE;
32354     }
32355 }
32356
32357 void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
32358 {
32359 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32360     if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32361     {
32362         uint8_t* range_beg = 0;
32363         uint8_t* range_end = 0;
32364
32365         if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end))
32366         {
32367             size_t  markw = mark_word_of (range_beg);
32368             size_t  markw_end = mark_word_of (range_end);
32369             while (markw < markw_end)
32370             {
32371                 if (mark_array [markw])
32372                 {
32373                     dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32374                                     markw, mark_array [markw], mark_word_address (markw)));
32375                     FATAL_GC_ERROR();
32376                 }
32377                 markw++;
32378             }
32379             uint8_t* p = mark_word_address (markw_end);
32380             while (p < range_end)
32381             {
32382                 assert (!(mark_array_marked (p)));
32383                 p++;
32384             }
32385         }
32386     }
32387 #endif //VERIFY_HEAP && MARK_ARRAY
32388 }
32389
32390 void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
32391 {
32392 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32393     size_t start_mark_bit = mark_bit_of (obj) + 1;
32394     size_t end_mark_bit = mark_bit_of (obj + s);
32395     unsigned int startbit = mark_bit_bit (start_mark_bit);
32396     unsigned int endbit = mark_bit_bit (end_mark_bit);
32397     size_t startwrd = mark_bit_word (start_mark_bit);
32398     size_t endwrd = mark_bit_word (end_mark_bit);
32399     unsigned int result = 0;
32400
32401     unsigned int firstwrd = ~(lowbits (~0, startbit));
32402     unsigned int lastwrd = ~(highbits (~0, endbit));
32403
32404     if (startwrd == endwrd)
32405     {
32406         unsigned int wrd = firstwrd & lastwrd;
32407         result = mark_array[startwrd] & wrd;
32408         if (result)
32409         {
32410             FATAL_GC_ERROR();
32411         }
32412         return;
32413     }
32414
32415     // verify the first mark word is cleared.
32416     if (startbit)
32417     {
32418         result = mark_array[startwrd] & firstwrd;
32419         if (result)
32420         {
32421             FATAL_GC_ERROR();
32422         }
32423         startwrd++;
32424     }
32425
32426     for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
32427     {
32428         result = mark_array[wrdtmp];
32429         if (result)
32430         {
32431             FATAL_GC_ERROR();
32432         }
32433     }
32434
32435     // set the last mark word.
32436     if (endbit)
32437     {
32438         result = mark_array[endwrd] & lastwrd;
32439         if (result)
32440         {
32441             FATAL_GC_ERROR();
32442         }
32443     }
32444 #endif //VERIFY_HEAP && MARK_ARRAY
32445 }
32446
32447 void gc_heap::clear_all_mark_array()
32448 {
32449 #ifdef MARK_ARRAY
32450     //size_t num_dwords_written = 0;
32451     //size_t begin_time = GetHighPrecisionTimeStamp();
32452
32453     generation* gen = generation_of (max_generation);
32454     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32455     
32456     while (1)
32457     {
32458         if (seg == 0)
32459         {
32460             if (gen != large_object_generation)
32461             {
32462                 gen = generation_of (max_generation+1);
32463                 seg = heap_segment_rw (generation_start_segment (gen));
32464             }
32465             else
32466             {
32467                 break;
32468             }
32469         }
32470
32471         uint8_t* range_beg = 0;
32472         uint8_t* range_end = 0;
32473
32474         if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end))
32475         { 
32476             size_t markw = mark_word_of (range_beg);
32477             size_t markw_end = mark_word_of (range_end);
32478             size_t size_total = (markw_end - markw) * sizeof (uint32_t);
32479             //num_dwords_written = markw_end - markw;
32480             size_t size = 0;
32481             size_t size_left = 0;
32482
32483             assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0);
32484
32485             if ((size_total & (sizeof(PTR_PTR) - 1)) != 0)
32486             {
32487                 size = (size_total & ~(sizeof(PTR_PTR) - 1));
32488                 size_left = size_total - size;
32489                 assert ((size_left & (sizeof (uint32_t) - 1)) == 0);
32490             }
32491             else
32492             {
32493                 size = size_total;
32494             }
32495
32496             memclr ((uint8_t*)&mark_array[markw], size);
32497
32498             if (size_left != 0)
32499             {
32500                 uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)];
32501                 for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++)
32502                 {
32503                     *markw_to_clear = 0;
32504                     markw_to_clear++;
32505                 }
32506             }
32507         }
32508
32509         seg = heap_segment_next_rw (seg);
32510     }
32511
32512     //size_t end_time = GetHighPrecisionTimeStamp() - begin_time; 
32513
32514     //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
32515
32516 #endif //MARK_ARRAY
32517 }
32518
32519 #endif //BACKGROUND_GC 
32520
32521 void gc_heap::verify_mark_array_cleared (heap_segment* seg)
32522 {
32523 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32524     assert (card_table == g_gc_card_table);
32525     size_t  markw = mark_word_of (heap_segment_mem (seg));
32526     size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
32527
32528     while (markw < markw_end)
32529     {
32530         if (mark_array [markw])
32531         {
32532             dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32533                             markw, mark_array [markw], mark_word_address (markw)));
32534             FATAL_GC_ERROR();
32535         }
32536         markw++;
32537     }
32538 #endif //VERIFY_HEAP && MARK_ARRAY
32539 }
32540
32541 void gc_heap::verify_mark_array_cleared ()
32542 {
32543 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32544     if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32545     {
32546         generation* gen = generation_of (max_generation);
32547         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32548         
32549         while (1)
32550         {
32551             if (seg == 0)
32552             {
32553                 if (gen != large_object_generation)
32554                 {
32555                     gen = generation_of (max_generation+1);
32556                     seg = heap_segment_rw (generation_start_segment (gen));
32557                 }
32558                 else
32559                 {
32560                     break;
32561                 }
32562             }
32563
32564             bgc_verify_mark_array_cleared (seg);
32565             seg = heap_segment_next_rw (seg);
32566         }
32567     }
32568 #endif //VERIFY_HEAP && MARK_ARRAY
32569 }
32570
32571 void gc_heap::verify_seg_end_mark_array_cleared()
32572 {
32573 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32574     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32575     {
32576         generation* gen = generation_of (max_generation);
32577         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32578         
32579         while (1)
32580         {
32581             if (seg == 0)
32582             {
32583                 if (gen != large_object_generation)
32584                 {
32585                     gen = generation_of (max_generation+1);
32586                     seg = heap_segment_rw (generation_start_segment (gen));
32587                 }
32588                 else
32589                 {
32590                     break;
32591                 }
32592             }
32593
32594             // We already cleared all mark array bits for ephemeral generations
32595             // at the beginning of bgc sweep
32596             uint8_t* from = ((seg == ephemeral_heap_segment) ?
32597                           generation_allocation_start (generation_of (max_generation - 1)) :
32598                           heap_segment_allocated (seg));
32599             size_t  markw = mark_word_of (align_on_mark_word (from));
32600             size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
32601
32602             while (from < mark_word_address (markw))
32603             {
32604                 if (is_mark_bit_set (from))
32605                 {
32606                     dprintf (3, ("mark bit for %Ix was not cleared", from));
32607                     FATAL_GC_ERROR();
32608                 }
32609
32610                 from += mark_bit_pitch;
32611             }
32612
32613             while (markw < markw_end)
32614             {
32615                 if (mark_array [markw])
32616                 {
32617                     dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared", 
32618                                     markw, mark_array [markw], mark_word_address (markw)));
32619                     FATAL_GC_ERROR();
32620                 }
32621                 markw++;
32622             }
32623             seg = heap_segment_next_rw (seg);
32624         }
32625     }
32626 #endif //VERIFY_HEAP && MARK_ARRAY
32627 }
32628
32629 // This function is called to make sure we don't mess up the segment list
32630 // in SOH. It's called by:
32631 // 1) begin and end of ephemeral GCs
32632 // 2) during bgc sweep when we switch segments.
32633 void gc_heap::verify_soh_segment_list()
32634 {
32635 #ifdef VERIFY_HEAP
32636     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32637     {
32638         generation* gen = generation_of (max_generation);
32639         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32640         heap_segment* last_seg = 0;
32641         while (seg)
32642         {
32643             last_seg = seg;
32644             seg = heap_segment_next_rw (seg);
32645         }
32646         if (last_seg != ephemeral_heap_segment)
32647         {
32648             FATAL_GC_ERROR();
32649         }
32650     }
32651 #endif //VERIFY_HEAP
32652 }
32653
32654 // This function can be called at any foreground GCs or blocking GCs. For background GCs,
32655 // it can be called at the end of the final marking; and at any point during background
32656 // sweep.
32657 // NOTE - to be able to call this function during background sweep, we need to temporarily 
32658 // NOT clear the mark array bits as we go.
32659 void gc_heap::verify_partial ()
32660 {
32661 #ifdef BACKGROUND_GC
32662     //printf ("GC#%d: Verifying loh during sweep\n", settings.gc_index);
32663     //generation* gen = large_object_generation;
32664     generation* gen = generation_of (max_generation);
32665     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32666     int align_const = get_alignment_constant (gen != large_object_generation);
32667
32668     uint8_t* o = 0;
32669     uint8_t* end = 0;
32670     size_t s = 0;
32671
32672     // Different ways to fail.
32673     BOOL mark_missed_p = FALSE;
32674     BOOL bad_ref_p = FALSE;
32675     BOOL free_ref_p = FALSE;
32676
32677     while (1)
32678     {
32679         if (seg == 0)
32680         {
32681             if (gen != large_object_generation)
32682             {
32683                 //switch to LOH
32684                 gen = large_object_generation;
32685                 align_const = get_alignment_constant (gen != large_object_generation);
32686                 seg = heap_segment_rw (generation_start_segment (gen));
32687                 continue;
32688             }
32689             else
32690             {
32691                 break;
32692             }
32693         }
32694
32695         o = heap_segment_mem (seg);
32696         end  = heap_segment_allocated (seg);
32697         //printf ("validating [%Ix-[%Ix\n", o, end);
32698         while (o < end)
32699         {
32700             s = size (o);
32701
32702             BOOL marked_p = background_object_marked (o, FALSE);
32703
32704             if (marked_p)
32705             {
32706                 go_through_object_cl (method_table (o), o, s, oo,
32707                     {
32708                         if (*oo)
32709                         {
32710                             //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o));
32711                             MethodTable *pMT = method_table (*oo);
32712
32713                             if (pMT == g_gc_pFreeObjectMethodTable)
32714                             {
32715                                 free_ref_p = TRUE;
32716                                 FATAL_GC_ERROR();
32717                             }
32718
32719                             if (!pMT->SanityCheck()) 
32720                             {
32721                                 bad_ref_p = TRUE;
32722                                 dprintf (3, ("Bad member of %Ix %Ix",
32723                                             (size_t)oo, (size_t)*oo));
32724                                 FATAL_GC_ERROR();
32725                             }
32726
32727                             if (current_bgc_state == bgc_final_marking)
32728                             {
32729                                 if (marked_p && !background_object_marked (*oo, FALSE))
32730                                 {
32731                                     mark_missed_p = TRUE;
32732                                     FATAL_GC_ERROR();
32733                                 }
32734                             }
32735                         }
32736                     }
32737                                     );
32738             }
32739
32740             o = o + Align(s, align_const);
32741         }
32742         seg = heap_segment_next_rw (seg);
32743     }
32744
32745     //printf ("didn't find any large object large enough...\n");
32746     //printf ("finished verifying loh\n");
32747 #endif //BACKGROUND_GC 
32748 }
32749
32750 #ifdef VERIFY_HEAP
32751
32752 void 
32753 gc_heap::verify_free_lists ()
32754 {
32755     for (int gen_num = 0; gen_num <= max_generation+1; gen_num++)
32756     {
32757         dprintf (3, ("Verifying free list for gen:%d", gen_num));
32758         allocator* gen_alloc = generation_allocator (generation_of (gen_num));
32759         size_t sz = gen_alloc->first_bucket_size();
32760         bool verify_undo_slot = (gen_num != 0) && (gen_num != max_generation+1) && !gen_alloc->discard_if_no_fit_p();
32761
32762         for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++)
32763         {
32764             uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number);
32765             uint8_t* prev = 0;
32766             while (free_list)
32767             {
32768                 if (!((CObjectHeader*)free_list)->IsFree())
32769                 {
32770                     dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't a free object)",
32771                                  (size_t)free_list));
32772                     FATAL_GC_ERROR();
32773                 }
32774                 if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz))
32775                     || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2)))
32776                 {
32777                     dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket",
32778                                  (size_t)free_list));
32779                     FATAL_GC_ERROR();
32780                 }
32781                 if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY))
32782                 {
32783                     dprintf (3, ("Verifiying Heap: curr free list item %Ix has non empty undo slot",
32784                                  (size_t)free_list));
32785                     FATAL_GC_ERROR();
32786                 }
32787                 if ((gen_num != max_generation+1)&&(object_gennum (free_list)!= gen_num))
32788                 {
32789                     dprintf (3, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list",
32790                                  (size_t)free_list));
32791                     FATAL_GC_ERROR();
32792                 }
32793                     
32794                 prev = free_list;
32795                 free_list = free_list_slot (free_list);
32796             }
32797             //verify the sanity of the tail 
32798             uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number);
32799             if (!((tail == 0) || (tail == prev)))
32800             {
32801                 dprintf (3, ("Verifying Heap: tail of free list is not correct"));
32802                 FATAL_GC_ERROR();
32803             }
32804             if (tail == 0)
32805             {
32806                 uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number);
32807                 if ((head != 0) && (free_list_slot (head) != 0))
32808                 {
32809                     dprintf (3, ("Verifying Heap: tail of free list is not correct"));
32810                     FATAL_GC_ERROR();
32811                 }
32812             }
32813
32814             sz *=2;
32815         }
32816     }
32817 }
32818
32819 void
32820 gc_heap::verify_heap (BOOL begin_gc_p)
32821 {
32822     int             heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel());
32823     size_t          last_valid_brick = 0;
32824     BOOL            bCurrentBrickInvalid = FALSE;
32825     BOOL            large_brick_p = TRUE;
32826     size_t          curr_brick = 0;
32827     size_t          prev_brick = (size_t)-1;
32828     int             curr_gen_num = max_generation+1;    
32829     heap_segment*   seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num ) ));
32830
32831     PREFIX_ASSUME(seg != NULL);
32832
32833     uint8_t*        curr_object = heap_segment_mem (seg);
32834     uint8_t*        prev_object = 0;
32835     uint8_t*        begin_youngest = generation_allocation_start(generation_of(0));
32836     uint8_t*        end_youngest = heap_segment_allocated (ephemeral_heap_segment);
32837     uint8_t*        next_boundary = generation_allocation_start (generation_of (max_generation - 1));
32838     int             align_const = get_alignment_constant (FALSE);
32839     size_t          total_objects_verified = 0;
32840     size_t          total_objects_verified_deep = 0;
32841
32842 #ifdef BACKGROUND_GC
32843     BOOL consider_bgc_mark_p    = FALSE;
32844     BOOL check_current_sweep_p  = FALSE;
32845     BOOL check_saved_sweep_p    = FALSE;
32846     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32847 #endif //BACKGROUND_GC
32848
32849 #ifdef MULTIPLE_HEAPS
32850     t_join* current_join = &gc_t_join;
32851 #ifdef BACKGROUND_GC
32852     if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
32853     {
32854         // We always call verify_heap on entry of GC on the SVR GC threads.
32855         current_join = &bgc_t_join;
32856     }
32857 #endif //BACKGROUND_GC
32858 #endif //MULTIPLE_HEAPS
32859
32860     UNREFERENCED_PARAMETER(begin_gc_p);
32861 #ifdef BACKGROUND_GC 
32862     dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin", 
32863         (begin_gc_p ? "BEG" : "END"),
32864         VolatileLoad(&settings.gc_index), 
32865         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
32866 #else
32867     dprintf (2,("[%s]GC#%d: Verifying heap - begin", 
32868                 (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
32869 #endif //BACKGROUND_GC 
32870
32871 #ifndef MULTIPLE_HEAPS
32872     if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
32873         (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
32874     {
32875         FATAL_GC_ERROR();
32876     }
32877 #endif //MULTIPLE_HEAPS
32878
32879 #ifdef BACKGROUND_GC
32880     //don't touch the memory because the program is allocating from it.
32881     if (!settings.concurrent)
32882 #endif //BACKGROUND_GC
32883     {
32884         if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL))
32885         {
32886             //uninit the unused portions of segments.
32887             generation* gen1 = large_object_generation;
32888             heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1));
32889             PREFIX_ASSUME(seg1 != NULL);
32890
32891             while (1)
32892             {
32893                 if (seg1)
32894                 {
32895                     uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew;
32896                     if (heap_segment_used (seg1) > clear_start)
32897                     {
32898                         dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa", 
32899                                     heap_segment_mem (seg1),
32900                                     clear_start ,
32901                                     heap_segment_used (seg1)));
32902                         memset (heap_segment_allocated (seg1) - plug_skew, 0xaa,
32903                             (heap_segment_used (seg1) - clear_start));
32904                     }
32905                     seg1 = heap_segment_next_rw (seg1);
32906                 }
32907                 else
32908                 {
32909                     if (gen1 == large_object_generation)
32910                     {
32911                         gen1 = generation_of (max_generation);
32912                         seg1 = heap_segment_rw (generation_start_segment (gen1));
32913                         PREFIX_ASSUME(seg1 != NULL);
32914                     }
32915                     else
32916                     {
32917                         break;
32918                     }
32919                 }
32920             }
32921         }
32922     }
32923
32924 #ifdef MULTIPLE_HEAPS
32925     current_join->join(this, gc_join_verify_copy_table);
32926     if (current_join->joined())
32927     {
32928         // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point
32929         for (int i = 0; i < n_heaps; i++)
32930         {
32931             //copy the card and brick tables
32932             if (g_gc_card_table != g_heaps[i]->card_table)
32933             {
32934                 g_heaps[i]->copy_brick_card_table();
32935             }
32936         }
32937
32938         current_join->restart();
32939     }
32940 #else
32941         if (g_gc_card_table != card_table)
32942             copy_brick_card_table();
32943 #endif //MULTIPLE_HEAPS
32944
32945     //verify that the generation structures makes sense
32946     {
32947         generation* gen = generation_of (max_generation);
32948
32949         assert (generation_allocation_start (gen) ==
32950                 heap_segment_mem (heap_segment_rw (generation_start_segment (gen))));
32951         int gen_num = max_generation-1;
32952         generation* prev_gen = gen;
32953         while (gen_num >= 0)
32954         {
32955             gen = generation_of (gen_num);
32956             assert (generation_allocation_segment (gen) == ephemeral_heap_segment);
32957             assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment));
32958             assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment));
32959
32960             if (generation_start_segment (prev_gen ) ==
32961                 generation_start_segment (gen))
32962             {
32963                 assert (generation_allocation_start (prev_gen) <
32964                         generation_allocation_start (gen));
32965             }
32966             prev_gen = gen;
32967             gen_num--;
32968         }
32969     }
32970
32971     while (1)
32972     {
32973         // Handle segment transitions
32974         if (curr_object >= heap_segment_allocated (seg))
32975         {
32976             if (curr_object > heap_segment_allocated(seg))
32977             {
32978                 dprintf (3, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix)",
32979                         (size_t)curr_object, (size_t)seg));
32980                 FATAL_GC_ERROR();
32981             }
32982             seg = heap_segment_next_in_range (seg);
32983             if (seg)
32984             {
32985 #ifdef BACKGROUND_GC
32986                 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32987 #endif //BACKGROUND_GC
32988                 curr_object = heap_segment_mem(seg);
32989                 prev_object = 0;
32990                 continue;
32991             }
32992             else
32993             {
32994                 if (curr_gen_num == (max_generation+1))
32995                 {
32996                     curr_gen_num--;
32997                     seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num)));
32998
32999                     PREFIX_ASSUME(seg != NULL);
33000
33001 #ifdef BACKGROUND_GC
33002                     should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33003 #endif //BACKGROUND_GC
33004                     curr_object = heap_segment_mem (seg);
33005                     prev_object = 0;
33006                     large_brick_p = FALSE;
33007                     align_const = get_alignment_constant (TRUE);
33008                 }
33009                 else
33010                     break;  // Done Verifying Heap -- no more segments
33011             }
33012         }
33013
33014         // Are we at the end of the youngest_generation?
33015         if (seg == ephemeral_heap_segment)
33016         {
33017             if (curr_object >= end_youngest)
33018             {
33019                 // prev_object length is too long if we hit this int3
33020                 if (curr_object > end_youngest)
33021                 {
33022                     dprintf (3, ("Verifiying Heap: curr_object: %Ix > end_youngest: %Ix",
33023                             (size_t)curr_object, (size_t)end_youngest));
33024                     FATAL_GC_ERROR();
33025                 }
33026                 break;
33027             }
33028             
33029             if ((curr_object >= next_boundary) && (curr_gen_num > 0))
33030             {
33031                 curr_gen_num--;
33032                 if (curr_gen_num > 0)
33033                 {
33034                     next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1));
33035                 }
33036             }
33037         }
33038
33039          //if (is_mark_set (curr_object))
33040          //{
33041          //        printf ("curr_object: %Ix is marked!",(size_t)curr_object);
33042          //        FATAL_GC_ERROR();
33043          //}
33044
33045         size_t s = size (curr_object);
33046         dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s));
33047         if (s == 0)
33048         {
33049             dprintf (3, ("Verifying Heap: size of current object %Ix == 0", curr_object));
33050             FATAL_GC_ERROR();
33051         }
33052
33053         // If object is not in the youngest generation, then lets
33054         // verify that the brick table is correct....
33055         if (((seg != ephemeral_heap_segment) ||
33056              (brick_of(curr_object) < brick_of(begin_youngest))))
33057         {
33058             curr_brick = brick_of(curr_object);
33059
33060             // Brick Table Verification...
33061             //
33062             // On brick transition
33063             //     if brick is negative
33064             //          verify that brick indirects to previous valid brick
33065             //     else
33066             //          set current brick invalid flag to be flipped if we
33067             //          encounter an object at the correct place
33068             //
33069             if (curr_brick != prev_brick)
33070             {
33071                 // If the last brick we were examining had positive
33072                 // entry but we never found the matching object, then
33073                 // we have a problem
33074                 // If prev_brick was the last one of the segment
33075                 // it's ok for it to be invalid because it is never looked at
33076                 if (bCurrentBrickInvalid &&
33077                     (curr_brick != brick_of (heap_segment_mem (seg))) &&
33078                     !heap_segment_read_only_p (seg))
33079                 {
33080                     dprintf (3, ("curr brick %Ix invalid", curr_brick));
33081                     FATAL_GC_ERROR();
33082                 }
33083
33084                 if (large_brick_p)
33085                 {
33086                     //large objects verify the table only if they are in
33087                     //range.
33088                     if ((heap_segment_reserved (seg) <= highest_address) &&
33089                         (heap_segment_mem (seg) >= lowest_address) &&
33090                         brick_table [curr_brick] != 0)
33091                     {
33092                         dprintf (3, ("curr_brick %Ix for large object %Ix not set to -32768",
33093                                 curr_brick, (size_t)curr_object));
33094                         FATAL_GC_ERROR();
33095                     }
33096                     else
33097                     {
33098                         bCurrentBrickInvalid = FALSE;
33099                     }
33100                 }
33101                 else
33102                 {
33103                     // If the current brick contains a negative value make sure
33104                     // that the indirection terminates at the last  valid brick
33105                     if (brick_table [curr_brick] <= 0)
33106                     {
33107                         if (brick_table [curr_brick] == 0)
33108                         {
33109                             dprintf(3, ("curr_brick %Ix for object %Ix set to 0",
33110                                     curr_brick, (size_t)curr_object));
33111                             FATAL_GC_ERROR();
33112                         }
33113                         ptrdiff_t i = curr_brick;
33114                         while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) &&
33115                                (brick_table[i] < 0))
33116                         {
33117                             i = i + brick_table[i];
33118                         }
33119                         if (i <  ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1))
33120                         {
33121                             dprintf (3, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix",
33122                                     i, brick_of (heap_segment_mem (seg)),
33123                                     curr_brick));
33124                             FATAL_GC_ERROR();
33125                         }
33126                         // if (i != last_valid_brick)
33127                         //  FATAL_GC_ERROR();
33128                         bCurrentBrickInvalid = FALSE;
33129                     }
33130                     else if (!heap_segment_read_only_p (seg))
33131                     {
33132                         bCurrentBrickInvalid = TRUE;
33133                     }
33134                 }
33135             }
33136
33137             if (bCurrentBrickInvalid)
33138             {
33139                 if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1))
33140                 {
33141                     bCurrentBrickInvalid = FALSE;
33142                     last_valid_brick = curr_brick;
33143                 }
33144             }
33145         }
33146
33147         if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable)
33148         {
33149 #ifdef FEATURE_LOH_COMPACTION
33150             if ((curr_gen_num == (max_generation+1)) && (prev_object != 0))
33151             {
33152                 assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable);
33153             }
33154 #endif //FEATURE_LOH_COMPACTION
33155
33156             total_objects_verified++;
33157
33158             BOOL can_verify_deep = TRUE;
33159 #ifdef BACKGROUND_GC
33160             can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p);
33161 #endif //BACKGROUND_GC
33162
33163             BOOL deep_verify_obj = can_verify_deep;
33164             if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
33165                 deep_verify_obj = FALSE;
33166
33167             ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
33168
33169             if (can_verify_deep)
33170             {
33171                 if (curr_gen_num > 0)
33172                 {
33173                     BOOL need_card_p = FALSE;
33174                     if (contain_pointers_or_collectible (curr_object))
33175                     {
33176                         dprintf (4, ("curr_object: %Ix", (size_t)curr_object));
33177                         size_t crd = card_of (curr_object);
33178                         BOOL found_card_p = card_set_p (crd);
33179
33180 #ifdef COLLECTIBLE_CLASS
33181                         if (is_collectible(curr_object))
33182                         {
33183                             uint8_t* class_obj = get_class_object (curr_object);
33184                             if ((class_obj < ephemeral_high) && (class_obj >= next_boundary))
33185                             {
33186                                 if (!found_card_p)
33187                                 {
33188                                     dprintf (3, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix",
33189                                                 card_of (curr_object), (size_t)curr_object, class_obj));
33190
33191                                     FATAL_GC_ERROR();
33192                                 }
33193                             }
33194                         }
33195 #endif //COLLECTIBLE_CLASS
33196
33197                         if (contain_pointers(curr_object))
33198                         {
33199                             go_through_object_nostart
33200                                 (method_table(curr_object), curr_object, s, oo,
33201                                 {
33202                                     if ((crd != card_of ((uint8_t*)oo)) && !found_card_p)
33203                                     {
33204                                         crd = card_of ((uint8_t*)oo);
33205                                         found_card_p = card_set_p (crd);
33206                                         need_card_p = FALSE;
33207                                     }
33208                                     if ((*oo < ephemeral_high) && (*oo >= next_boundary))
33209                                     {
33210                                         need_card_p = TRUE;
33211                                     }
33212
33213                                 if (need_card_p && !found_card_p)
33214                                 {
33215
33216                                         dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33217                                                     card_of (curr_object), (size_t)curr_object,
33218                                                     card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33219                                         FATAL_GC_ERROR();
33220                                     }
33221                                 }
33222                                     );
33223                         }
33224                         if (need_card_p && !found_card_p)
33225                         {
33226                             dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33227                                     card_of (curr_object), (size_t)curr_object,
33228                                     card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33229                             FATAL_GC_ERROR();
33230                         }
33231                     }
33232                 }
33233                 total_objects_verified_deep++;
33234             }
33235         }
33236
33237         prev_object = curr_object;
33238         prev_brick = curr_brick;
33239         curr_object = curr_object + Align(s, align_const);
33240         if (curr_object < prev_object)
33241         {
33242             dprintf (3, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s));
33243             FATAL_GC_ERROR();
33244         }
33245     }
33246
33247 #ifdef BACKGROUND_GC
33248     dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id", 
33249                  (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
33250                  (begin_gc_p ? "BEG" : "END"),
33251                  ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
33252                  total_objects_verified, total_objects_verified_deep));
33253     if (current_c_gc_state != c_gc_state_planning)
33254     {
33255         assert (total_objects_verified == total_objects_verified_deep);
33256     }
33257 #endif //BACKGROUND_GC
33258     
33259     verify_free_lists();
33260
33261 #ifdef FEATURE_PREMORTEM_FINALIZATION
33262     finalize_queue->CheckFinalizerObjects();
33263 #endif // FEATURE_PREMORTEM_FINALIZATION
33264
33265     {
33266         // to be consistent with handle table APIs pass a ScanContext*
33267         // to provide the heap number.  the SC isn't complete though so
33268         // limit its scope to handle table verification.
33269         ScanContext sc;
33270         sc.thread_number = heap_number;
33271         GCScan::VerifyHandleTable(max_generation, max_generation, &sc);
33272     }
33273
33274 #ifdef MULTIPLE_HEAPS
33275     current_join->join(this, gc_join_verify_objects_done);
33276     if (current_join->joined())
33277 #endif //MULTIPLE_HEAPS
33278     {
33279         SyncBlockCache::GetSyncBlockCache()->VerifySyncTableEntry();
33280 #ifdef MULTIPLE_HEAPS
33281         current_join->restart();
33282 #endif //MULTIPLE_HEAPS
33283     }
33284
33285 #ifdef BACKGROUND_GC 
33286     if (!settings.concurrent)
33287     {
33288         if (current_c_gc_state == c_gc_state_planning)
33289         {
33290             // temporarily commenting this out 'cause an FGC
33291             // could be triggered before we sweep ephemeral.
33292             //verify_seg_end_mark_array_cleared();
33293         }
33294     }
33295
33296     if (settings.concurrent)
33297     {
33298         verify_mark_array_cleared();
33299     }
33300     dprintf (2,("GC%d(%s): Verifying heap - end", 
33301         VolatileLoad(&settings.gc_index), 
33302         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
33303 #else
33304     dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
33305 #endif //BACKGROUND_GC 
33306 }
33307
33308 #endif  //VERIFY_HEAP
33309
33310
33311 void GCHeap::ValidateObjectMember (Object* obj)
33312 {
33313 #ifdef VERIFY_HEAP
33314     size_t s = size (obj);
33315     uint8_t* o = (uint8_t*)obj;
33316
33317     go_through_object_cl (method_table (obj), o, s, oo,
33318                                 {
33319                                     uint8_t* child_o = *oo;
33320                                     if (child_o)
33321                                     {
33322                                         dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o));
33323                                         MethodTable *pMT = method_table (child_o);
33324                                         assert(pMT);
33325                                         if (!pMT->SanityCheck()) {
33326                                             dprintf (3, ("Bad member of %Ix %Ix",
33327                                                         (size_t)oo, (size_t)child_o));
33328                                             FATAL_GC_ERROR();
33329                                         }
33330                                     }
33331                                 } );
33332 #endif // VERIFY_HEAP
33333 }
33334
33335 void DestructObject (CObjectHeader* hdr)
33336 {
33337     UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
33338     hdr->~CObjectHeader();
33339 }
33340
33341 HRESULT GCHeap::Shutdown ()
33342 {
33343     deleteGCShadow();
33344
33345     GCScan::GcRuntimeStructuresValid (FALSE);
33346
33347     // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all
33348     // threads except the one performing the shutdown.
33349     // ASSERT( !GcInProgress );
33350
33351     // Guard against any more GC occurring and against any threads blocking
33352     // for GC to complete when the GC heap is gone.  This fixes a race condition
33353     // where a thread in GC is destroyed as part of process destruction and
33354     // the remaining threads block for GC complete.
33355
33356     //GCTODO
33357     //EnterAllocLock();
33358     //Enter();
33359     //EnterFinalizeLock();
33360     //SetGCDone();
33361
33362     // during shutdown lot of threads are suspended
33363     // on this even, we don't want to wake them up just yet
33364     //CloseHandle (WaitForGCEvent);
33365
33366     //find out if the global card table hasn't been used yet
33367     uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
33368     if (card_table_refcount (ct) == 0)
33369     {
33370         destroy_card_table (ct);
33371         g_gc_card_table = nullptr;
33372
33373 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
33374         g_gc_card_bundle_table = nullptr;
33375 #endif
33376 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33377         SoftwareWriteWatch::StaticClose();
33378 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33379     }
33380
33381     //destroy all segments on the standby list
33382     while(gc_heap::segment_standby_list != 0)
33383     {
33384         heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list);
33385 #ifdef MULTIPLE_HEAPS
33386         (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33387 #else //MULTIPLE_HEAPS
33388         pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33389 #endif //MULTIPLE_HEAPS
33390         gc_heap::segment_standby_list = next_seg;
33391     }
33392
33393
33394 #ifdef MULTIPLE_HEAPS
33395
33396     for (int i = 0; i < gc_heap::n_heaps; i ++)
33397     {
33398         delete gc_heap::g_heaps[i]->vm_heap;
33399         //destroy pure GC stuff
33400         gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]);
33401     }
33402 #else
33403     gc_heap::destroy_gc_heap (pGenGCHeap);
33404
33405 #endif //MULTIPLE_HEAPS
33406     gc_heap::shutdown_gc();
33407
33408     return S_OK;
33409 }
33410
33411 // Wait until a garbage collection is complete
33412 // returns NOERROR if wait was OK, other error code if failure.
33413 // WARNING: This will not undo the must complete state. If you are
33414 // in a must complete when you call this, you'd better know what you're
33415 // doing.
33416
33417 #ifdef FEATURE_PREMORTEM_FINALIZATION
33418 static
33419 HRESULT AllocateCFinalize(CFinalize **pCFinalize)
33420 {
33421     *pCFinalize = new (nothrow) CFinalize();
33422     if (*pCFinalize == NULL || !(*pCFinalize)->Initialize())
33423         return E_OUTOFMEMORY;
33424
33425     return S_OK;
33426 }
33427 #endif // FEATURE_PREMORTEM_FINALIZATION
33428
33429 // init the instance heap
33430 HRESULT GCHeap::Init(size_t hn)
33431 {
33432     HRESULT hres = S_OK;
33433
33434 #ifdef MULTIPLE_HEAPS
33435     if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
33436         hres = E_OUTOFMEMORY;
33437 #else
33438     UNREFERENCED_PARAMETER(hn);
33439     if (!gc_heap::make_gc_heap())
33440         hres = E_OUTOFMEMORY;
33441 #endif //MULTIPLE_HEAPS
33442
33443     // Failed.
33444     return hres;
33445 }
33446
33447 //System wide initialization
33448 HRESULT GCHeap::Initialize ()
33449 {
33450     HRESULT hr = S_OK;
33451
33452     g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
33453     g_num_processors = GCToOSInterface::GetTotalProcessorCount();
33454     assert(g_num_processors != 0);
33455
33456 //Initialize the static members.
33457 #ifdef TRACE_GC
33458     GcDuration = 0;
33459     CreatedObjectCount = 0;
33460 #endif //TRACE_GC
33461
33462     size_t seg_size = get_valid_segment_size();
33463     gc_heap::soh_segment_size = seg_size;
33464     size_t large_seg_size = get_valid_segment_size(TRUE);
33465     gc_heap::min_loh_segment_size = large_seg_size;
33466     gc_heap::min_segment_size = min (seg_size, large_seg_size);
33467 #ifdef SEG_MAPPING_TABLE
33468     gc_heap::min_segment_size_shr = index_of_set_bit (gc_heap::min_segment_size);
33469 #endif //SEG_MAPPING_TABLE
33470
33471 #ifdef MULTIPLE_HEAPS
33472     if (GCConfig::GetNoAffinitize())
33473         gc_heap::gc_thread_no_affinitize_p = true;
33474
33475     uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
33476     
33477     uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
33478
33479     uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
33480                                              (min (nhp_from_config, nhp_from_process)));
33481
33482     nhp = min (nhp, MAX_SUPPORTED_CPUS);
33483
33484     hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
33485 #else
33486     hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
33487 #endif //MULTIPLE_HEAPS
33488
33489     if (hr != S_OK)
33490         return hr;
33491
33492     gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit();
33493
33494     gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
33495 #ifndef MULTIPLE_HEAPS
33496     gc_heap::mem_one_percent /= g_num_processors;
33497 #endif //!MULTIPLE_HEAPS
33498
33499     // We should only use this if we are in the "many process" mode which really is only applicable
33500     // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory. 
33501     // For now I am using an estimate to calculate these numbers but this should really be obtained 
33502     // programmatically going forward.
33503     // I am assuming 47 processes using WKS GC and 3 using SVR GC.
33504     // I am assuming 3 in part due to the "very high memory load" is 97%.
33505     int available_mem_th = 10;
33506     if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
33507     {
33508         int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_num_processors));
33509         available_mem_th = min (available_mem_th, adjusted_available_mem_th);
33510     }
33511
33512     gc_heap::high_memory_load_th = 100 - available_mem_th;
33513
33514 #if defined(BIT64) 
33515     gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent;
33516 #endif // BIT64
33517
33518     WaitForGCEvent = new (nothrow) GCEvent;
33519
33520     if (!WaitForGCEvent)
33521     {
33522         return E_OUTOFMEMORY;
33523     }
33524
33525     if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE))
33526     {
33527         return E_FAIL;
33528     }
33529
33530 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
33531 #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
33532     if (GCStress<cfg_any>::IsEnabled())  {
33533         for(int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
33534             m_StressObjs[i] = CreateGlobalHandle(0);
33535         m_CurStressObj = 0;
33536     }
33537 #endif //STRESS_HEAP && !MULTIPLE_HEAPS
33538 #endif // FEATURE_REDHAWK
33539
33540     initGCShadow();         // If we are debugging write barriers, initialize heap shadow
33541
33542 #ifdef MULTIPLE_HEAPS
33543
33544     for (unsigned i = 0; i < nhp; i++)
33545     {
33546         GCHeap* Hp = new (nothrow) GCHeap();
33547         if (!Hp)
33548             return E_OUTOFMEMORY;
33549
33550         if ((hr = Hp->Init (i))!= S_OK)
33551         {
33552             return hr;
33553         }
33554     }
33555     // initialize numa node to heap map
33556     heap_select::init_numa_node_to_heap_map(nhp);
33557 #else
33558     hr = Init (0);
33559 #endif //MULTIPLE_HEAPS
33560
33561     if (hr == S_OK)
33562     {
33563         GCScan::GcRuntimeStructuresValid (TRUE);
33564
33565         GCToEEInterface::DiagUpdateGenerationBounds();
33566     }
33567
33568     return hr;
33569 };
33570
33571 ////
33572 // GC callback functions
33573 bool GCHeap::IsPromoted(Object* object)
33574 {
33575 #ifdef _DEBUG
33576     ((CObjectHeader*)object)->Validate();
33577 #endif //_DEBUG
33578
33579     uint8_t* o = (uint8_t*)object;
33580
33581     if (gc_heap::settings.condemned_generation == max_generation)
33582     {
33583 #ifdef MULTIPLE_HEAPS
33584         gc_heap* hp = gc_heap::g_heaps[0];
33585 #else
33586         gc_heap* hp = pGenGCHeap;
33587 #endif //MULTIPLE_HEAPS
33588
33589 #ifdef BACKGROUND_GC
33590         if (gc_heap::settings.concurrent)
33591         {
33592             bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
33593                             hp->background_marked (o));
33594             return is_marked;
33595         }
33596         else
33597 #endif //BACKGROUND_GC
33598         {
33599             return (!((o < hp->highest_address) && (o >= hp->lowest_address))
33600                     || hp->is_mark_set (o));
33601         }
33602     }
33603     else
33604     {
33605         gc_heap* hp = gc_heap::heap_of (o);
33606         return (!((o < hp->gc_high) && (o >= hp->gc_low))
33607                 || hp->is_mark_set (o));
33608     }
33609 }
33610
33611 size_t GCHeap::GetPromotedBytes(int heap_index)
33612 {
33613 #ifdef BACKGROUND_GC
33614     if (gc_heap::settings.concurrent)
33615     {
33616         return gc_heap::bpromoted_bytes (heap_index);
33617     }
33618     else
33619 #endif //BACKGROUND_GC
33620     {
33621         return gc_heap::promoted_bytes (heap_index);
33622     }
33623 }
33624
33625 unsigned int GCHeap::WhichGeneration (Object* object)
33626 {
33627     gc_heap* hp = gc_heap::heap_of ((uint8_t*)object);
33628     unsigned int g = hp->object_gennum ((uint8_t*)object);
33629     dprintf (3, ("%Ix is in gen %d", (size_t)object, g));
33630     return g;
33631 }
33632
33633 bool GCHeap::IsEphemeral (Object* object)
33634 {
33635     uint8_t* o = (uint8_t*)object;
33636     gc_heap* hp = gc_heap::heap_of (o);
33637     return !!hp->ephemeral_pointer_p (o);
33638 }
33639
33640 // Return NULL if can't find next object. When EE is not suspended,
33641 // the result is not accurate: if the input arg is in gen0, the function could 
33642 // return zeroed out memory as next object
33643 Object * GCHeap::NextObj (Object * object)
33644 {
33645 #ifdef VERIFY_HEAP
33646     uint8_t* o = (uint8_t*)object;
33647
33648 #ifndef FEATURE_BASICFREEZE
33649     if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
33650     {
33651         return NULL;
33652     }
33653 #endif //!FEATURE_BASICFREEZE
33654
33655     heap_segment * hs = gc_heap::find_segment (o, FALSE);
33656     if (!hs)
33657     {
33658         return NULL;
33659     }
33660
33661     BOOL large_object_p = heap_segment_loh_p (hs);
33662     if (large_object_p)
33663         return NULL; //could be racing with another core allocating. 
33664 #ifdef MULTIPLE_HEAPS
33665     gc_heap* hp = heap_segment_heap (hs);
33666 #else //MULTIPLE_HEAPS
33667     gc_heap* hp = 0;
33668 #endif //MULTIPLE_HEAPS
33669     unsigned int g = hp->object_gennum ((uint8_t*)object);
33670     if ((g == 0) && hp->settings.demotion)
33671         return NULL;//could be racing with another core allocating. 
33672     int align_const = get_alignment_constant (!large_object_p);
33673     uint8_t* nextobj = o + Align (size (o), align_const);
33674     if (nextobj <= o) // either overflow or 0 sized object.
33675     {
33676         return NULL;
33677     }
33678
33679     if ((nextobj < heap_segment_mem(hs)) || 
33680         (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) || 
33681         (nextobj >= hp->alloc_allocated))
33682     {
33683         return NULL;
33684     }
33685
33686     return (Object *)nextobj;
33687 #else
33688     return nullptr;
33689 #endif // VERIFY_HEAP
33690 }
33691
33692 #ifdef VERIFY_HEAP
33693
33694 #ifdef FEATURE_BASICFREEZE
33695 BOOL GCHeap::IsInFrozenSegment (Object * object)
33696 {
33697     uint8_t* o = (uint8_t*)object;
33698     heap_segment * hs = gc_heap::find_segment (o, FALSE);
33699     //We create a frozen object for each frozen segment before the segment is inserted
33700     //to segment list; during ngen, we could also create frozen objects in segments which
33701     //don't belong to current GC heap.
33702     //So we return true if hs is NULL. It might create a hole about detecting invalidate 
33703     //object. But given all other checks present, the hole should be very small
33704     return !hs || heap_segment_read_only_p (hs);
33705 }
33706 #endif //FEATURE_BASICFREEZE
33707
33708 #endif //VERIFY_HEAP
33709
33710 // returns TRUE if the pointer is in one of the GC heaps.
33711 bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
33712 {
33713     STATIC_CONTRACT_SO_TOLERANT;
33714
33715     // removed STATIC_CONTRACT_CAN_TAKE_LOCK here because find_segment 
33716     // no longer calls GCEvent::Wait which eventually takes a lock.
33717
33718     uint8_t* object = (uint8_t*) vpObject;
33719 #ifndef FEATURE_BASICFREEZE
33720     if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
33721         return FALSE;
33722 #endif //!FEATURE_BASICFREEZE
33723
33724     heap_segment * hs = gc_heap::find_segment (object, small_heap_only);
33725     return !!hs;
33726 }
33727
33728 #ifdef STRESS_PINNING
33729 static n_promote = 0;
33730 #endif //STRESS_PINNING
33731 // promote an object
33732 void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
33733 {
33734     THREAD_NUMBER_FROM_CONTEXT;
33735 #ifndef MULTIPLE_HEAPS
33736     const int thread = 0;
33737 #endif //!MULTIPLE_HEAPS
33738
33739     uint8_t* o = (uint8_t*)*ppObject;
33740
33741     if (o == 0)
33742         return;
33743
33744 #ifdef DEBUG_DestroyedHandleValue
33745     // we can race with destroy handle during concurrent scan
33746     if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
33747         return;
33748 #endif //DEBUG_DestroyedHandleValue
33749
33750     HEAP_FROM_THREAD;
33751
33752     gc_heap* hp = gc_heap::heap_of (o);
33753
33754     dprintf (3, ("Promote %Ix", (size_t)o));
33755
33756 #ifdef INTERIOR_POINTERS
33757     if (flags & GC_CALL_INTERIOR)
33758     {
33759         if ((o < hp->gc_low) || (o >= hp->gc_high))
33760         {
33761             return;
33762         }
33763         if ( (o = hp->find_object (o, hp->gc_low)) == 0)
33764         {
33765             return;
33766         }
33767
33768     }
33769 #endif //INTERIOR_POINTERS
33770
33771 #ifdef FEATURE_CONSERVATIVE_GC
33772     // For conservative GC, a value on stack may point to middle of a free object.
33773     // In this case, we don't need to promote the pointer.
33774     if (GCConfig::GetConservativeGC()
33775         && ((CObjectHeader*)o)->IsFree())
33776     {
33777         return;
33778     }
33779 #endif
33780
33781 #ifdef _DEBUG
33782     ((CObjectHeader*)o)->ValidatePromote(sc, flags);
33783 #else 
33784     UNREFERENCED_PARAMETER(sc);
33785 #endif //_DEBUG
33786
33787     if (flags & GC_CALL_PINNED)
33788         hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
33789
33790 #ifdef STRESS_PINNING
33791     if ((++n_promote % 20) == 1)
33792             hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
33793 #endif //STRESS_PINNING
33794
33795 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
33796     size_t promoted_size_begin = hp->promoted_bytes (thread);
33797 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
33798
33799     if ((o >= hp->gc_low) && (o < hp->gc_high))
33800     {
33801         hpt->mark_object_simple (&o THREAD_NUMBER_ARG);
33802     }
33803
33804 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
33805     size_t promoted_size_end = hp->promoted_bytes (thread);
33806     if (g_fEnableARM)
33807     {
33808         if (sc->pCurrentDomain)
33809         {
33810             sc->pCurrentDomain->RecordSurvivedBytes ((promoted_size_end - promoted_size_begin), thread);
33811         }
33812     }
33813 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
33814
33815     STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL);
33816 }
33817
33818 void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
33819                        uint32_t flags)
33820 {
33821     UNREFERENCED_PARAMETER(sc);
33822
33823     uint8_t* object = (uint8_t*)(Object*)(*ppObject);
33824     
33825     THREAD_NUMBER_FROM_CONTEXT;
33826
33827     //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject));
33828     dprintf (3, ("R: %Ix", (size_t)ppObject));
33829     
33830     if (object == 0)
33831         return;
33832
33833     gc_heap* hp = gc_heap::heap_of (object);
33834
33835 #ifdef _DEBUG
33836     if (!(flags & GC_CALL_INTERIOR))
33837     {
33838         // We cannot validate this object if it's in the condemned gen because it could 
33839         // be one of the objects that were overwritten by an artificial gap due to a pinned plug.
33840         if (!((object >= hp->gc_low) && (object < hp->gc_high)))
33841         {
33842             ((CObjectHeader*)object)->Validate(FALSE);
33843         }
33844     }
33845 #endif //_DEBUG
33846
33847     dprintf (3, ("Relocate %Ix\n", (size_t)object));
33848
33849     uint8_t* pheader;
33850
33851     if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction)
33852     {
33853         if (!((object >= hp->gc_low) && (object < hp->gc_high)))
33854         {
33855             return;
33856         }
33857
33858         if (gc_heap::loh_object_p (object))
33859         {
33860             pheader = hp->find_object (object, 0);
33861             if (pheader == 0)
33862             {
33863                 return;
33864             }
33865
33866             ptrdiff_t ref_offset = object - pheader;
33867             hp->relocate_address(&pheader THREAD_NUMBER_ARG);
33868             *ppObject = (Object*)(pheader + ref_offset);
33869             return;
33870         }
33871     }
33872
33873     {
33874         pheader = object;
33875         hp->relocate_address(&pheader THREAD_NUMBER_ARG);
33876         *ppObject = (Object*)pheader;
33877     }
33878
33879     STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
33880 }
33881
33882 /*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
33883 {
33884     // For now we simply look at the size of the object to determine if it in the
33885     // fixed heap or not. If the bit indicating this gets set at some point
33886     // we should key off that instead.
33887     return size( pObj ) >= LARGE_OBJECT_SIZE;
33888 }
33889
33890 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
33891 #ifdef STRESS_HEAP
33892
33893 void StressHeapDummy ();
33894
33895 static int32_t GCStressStartCount = -1;
33896 static int32_t GCStressCurCount = 0;
33897 static int32_t GCStressStartAtJit = -1;
33898
33899 // the maximum number of foreground GCs we'll induce during one BGC
33900 // (this number does not include "naturally" occuring GCs).
33901 static int32_t GCStressMaxFGCsPerBGC = -1;
33902
33903 // CLRRandom implementation can produce FPU exceptions if 
33904 // the test/application run by CLR is enabling any FPU exceptions. 
33905 // We want to avoid any unexpected exception coming from stress 
33906 // infrastructure, so CLRRandom is not an option.
33907 // The code below is a replicate of CRT rand() implementation.
33908 // Using CRT rand() is not an option because we will interfere with the user application
33909 // that may also use it. 
33910 int StressRNG(int iMaxValue)
33911 {
33912     static BOOL bisRandInit = FALSE;
33913     static int lHoldrand = 1L;
33914
33915     if (!bisRandInit)
33916     {
33917         lHoldrand = (int)time(NULL);
33918         bisRandInit = TRUE;
33919     }
33920     int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
33921     return randValue % iMaxValue;
33922 }
33923 #endif // STRESS_HEAP
33924 #endif // !FEATURE_REDHAWK
33925
33926 // free up object so that things will move and then do a GC
33927 //return TRUE if GC actually happens, otherwise FALSE
33928 bool GCHeap::StressHeap(gc_alloc_context * context)
33929 {
33930 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
33931     alloc_context* acontext = static_cast<alloc_context*>(context);
33932     assert(context != nullptr);
33933
33934     // if GC stress was dynamically disabled during this run we return FALSE
33935     if (!GCStressPolicy::IsEnabled())
33936         return FALSE;
33937
33938 #ifdef _DEBUG
33939     if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) {
33940         return FALSE;
33941     }
33942
33943 #endif //_DEBUG
33944
33945     if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE)
33946 #ifdef _DEBUG
33947         || g_pConfig->FastGCStressLevel() > 1
33948 #endif //_DEBUG
33949         ) {
33950         if (!Thread::UniqueStack(&acontext)) {
33951             return FALSE;
33952         }
33953     }
33954
33955 #ifdef BACKGROUND_GC
33956         // don't trigger a GC from the GC threads but still trigger GCs from user threads.
33957         if (GCToEEInterface::WasCurrentThreadCreatedByGC())
33958         {
33959             return FALSE;
33960         }
33961 #endif //BACKGROUND_GC
33962
33963         if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
33964         {
33965             GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
33966             GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
33967         }
33968
33969         if (GCStressMaxFGCsPerBGC == -1)
33970         {
33971             GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
33972             if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
33973                 GCStressMaxFGCsPerBGC = 6;
33974         }
33975
33976 #ifdef _DEBUG
33977         if (g_JitCount < GCStressStartAtJit)
33978             return FALSE;
33979 #endif //_DEBUG
33980
33981         // Allow programmer to skip the first N Stress GCs so that you can
33982         // get to the interesting ones faster.
33983         Interlocked::Increment(&GCStressCurCount);
33984         if (GCStressCurCount < GCStressStartCount)
33985             return FALSE;
33986
33987         // throttle the number of stress-induced GCs by a factor given by GCStressStep
33988         if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
33989         {
33990             return FALSE;
33991         }
33992
33993 #ifdef BACKGROUND_GC
33994         if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
33995         {
33996             // allow a maximum number of stress induced FGCs during one BGC
33997             if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
33998                 return FALSE;
33999             ++gc_stress_fgcs_in_bgc;
34000         }
34001 #endif // BACKGROUND_GC
34002
34003     if (g_pStringClass == 0)
34004     {
34005         // If the String class has not been loaded, dont do any stressing. This should
34006         // be kept to a minimum to get as complete coverage as possible.
34007         _ASSERTE(g_fEEInit);
34008         return FALSE;
34009     }
34010
34011 #ifndef MULTIPLE_HEAPS
34012     static int32_t OneAtATime = -1;
34013
34014     // Only bother with this if the stress level is big enough and if nobody else is
34015     // doing it right now.  Note that some callers are inside the AllocLock and are
34016     // guaranteed synchronized.  But others are using AllocationContexts and have no
34017     // particular synchronization.
34018     //
34019     // For this latter case, we want a very high-speed way of limiting this to one
34020     // at a time.  A secondary advantage is that we release part of our StressObjs
34021     // buffer sparingly but just as effectively.
34022
34023     if (Interlocked::Increment(&OneAtATime) == 0 &&
34024         !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
34025     {
34026         StringObject* str;
34027
34028         // If the current string is used up
34029         if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0)
34030         {
34031             // Populate handles with strings
34032             int i = m_CurStressObj;
34033             while(HndFetchHandle(m_StressObjs[i]) == 0)
34034             {
34035                 _ASSERTE(m_StressObjs[i] != 0);
34036                 unsigned strLen = (LARGE_OBJECT_SIZE - 32) / sizeof(WCHAR);
34037                 unsigned strSize = PtrAlign(StringObject::GetSize(strLen));
34038                 
34039                 // update the cached type handle before allocating
34040                 SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
34041                 str = (StringObject*) pGenGCHeap->allocate (strSize, acontext);
34042                 if (str)
34043                 {
34044                     str->SetMethodTable (g_pStringClass);
34045                     str->SetStringLength (strLen);
34046
34047                     HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
34048                 }
34049                 i = (i + 1) % NUM_HEAP_STRESS_OBJS;
34050                 if (i == m_CurStressObj) break;
34051             }
34052
34053             // advance the current handle to the next string
34054             m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS;
34055         }
34056
34057         // Get the current string
34058         str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj]));
34059         if (str)
34060         {
34061             // Chop off the end of the string and form a new object out of it.
34062             // This will 'free' an object at the begining of the heap, which will
34063             // force data movement.  Note that we can only do this so many times.
34064             // before we have to move on to the next string.
34065             unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31);
34066             if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR))
34067             {
34068                 unsigned sizeToNextObj = (unsigned)Align(size(str));
34069                 uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj;
34070                 pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj);                    
34071                 str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR)));
34072             }
34073             else
34074             {
34075                 // Let the string itself become garbage.
34076                 // will be realloced next time around
34077                 HndAssignHandle(m_StressObjs[m_CurStressObj], 0);
34078             }
34079         }
34080     }
34081     Interlocked::Decrement(&OneAtATime);
34082 #endif // !MULTIPLE_HEAPS
34083     if (IsConcurrentGCEnabled())
34084     {
34085         int rgen = StressRNG(10);
34086
34087         // gen0:gen1:gen2 distribution: 40:40:20
34088         if (rgen >= 8)
34089             rgen = 2;
34090         else if (rgen >= 4)
34091             rgen = 1;
34092     else
34093             rgen = 0;
34094
34095         GarbageCollectTry (rgen, FALSE, collection_gcstress);
34096     }
34097     else
34098     {
34099         GarbageCollect(max_generation, FALSE, collection_gcstress);
34100     }
34101
34102     return TRUE;
34103 #else
34104     UNREFERENCED_PARAMETER(context);
34105     return FALSE;
34106 #endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34107 }
34108
34109
34110 #ifdef FEATURE_PREMORTEM_FINALIZATION
34111 #define REGISTER_FOR_FINALIZATION(_object, _size) \
34112     hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
34113 #else // FEATURE_PREMORTEM_FINALIZATION
34114 #define REGISTER_FOR_FINALIZATION(_object, _size) true
34115 #endif // FEATURE_PREMORTEM_FINALIZATION
34116
34117 #define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do {  \
34118     if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size)))   \
34119     {                                                                                       \
34120         STRESS_LOG_OOM_STACK(_size);                                                        \
34121         return NULL;                                                                        \
34122     }                                                                                       \
34123 } while (false)
34124
34125 //
34126 // Small Object Allocator
34127 //
34128 //
34129 // Allocate small object with an alignment requirement of 8-bytes.
34130 Object*
34131 GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
34132 {
34133 #ifdef FEATURE_64BIT_ALIGNMENT
34134     CONTRACTL {
34135         NOTHROW;
34136         GC_TRIGGERS;
34137     } CONTRACTL_END;
34138
34139     alloc_context* acontext = static_cast<alloc_context*>(ctx);
34140
34141 #ifdef MULTIPLE_HEAPS
34142     if (acontext->get_alloc_heap() == 0)
34143     {
34144         AssignHeap (acontext);
34145         assert (acontext->get_alloc_heap());
34146     }
34147
34148     gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
34149 #else
34150     gc_heap* hp = pGenGCHeap;
34151 #endif //MULTIPLE_HEAPS
34152
34153     return AllocAlign8Common(hp, acontext, size, flags);
34154 #else
34155     UNREFERENCED_PARAMETER(ctx);
34156     UNREFERENCED_PARAMETER(size);
34157     UNREFERENCED_PARAMETER(flags);
34158     assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
34159     return nullptr;
34160 #endif  //FEATURE_64BIT_ALIGNMENT
34161 }
34162
34163 // Common code used by both variants of AllocAlign8 above.
34164 Object*
34165 GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
34166 {
34167 #ifdef FEATURE_64BIT_ALIGNMENT
34168     CONTRACTL {
34169         NOTHROW;
34170         GC_TRIGGERS;
34171     } CONTRACTL_END;
34172
34173     gc_heap* hp = (gc_heap*)_hp;
34174
34175     TRIGGERSGC();
34176
34177     Object* newAlloc = NULL;
34178
34179 #ifdef TRACE_GC
34180 #ifdef COUNT_CYCLES
34181     AllocStart = GetCycleCount32();
34182     unsigned finish;
34183 #elif defined(ENABLE_INSTRUMENTATION)
34184     unsigned AllocStart = GetInstLogTime();
34185     unsigned finish;
34186 #endif //COUNT_CYCLES
34187 #endif //TRACE_GC
34188
34189     if (size < LARGE_OBJECT_SIZE)
34190     {
34191 #ifdef TRACE_GC
34192         AllocSmallCount++;
34193 #endif //TRACE_GC
34194
34195         // Depending on where in the object the payload requiring 8-byte alignment resides we might have to
34196         // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
34197         // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
34198         size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;
34199
34200         // Retrieve the address of the next allocation from the context (note that we're inside the alloc
34201         // lock at this point).
34202         uint8_t*  result = acontext->alloc_ptr;
34203
34204         // Will an allocation at this point yield the correct alignment and fit into the remainder of the
34205         // context?
34206         if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
34207         {
34208             // Yes, we can just go ahead and make the allocation.
34209             newAlloc = (Object*) hp->allocate (size, acontext);
34210             ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34211         }
34212         else
34213         {
34214             // No, either the next available address is not aligned in the way we require it or there's
34215             // not enough space to allocate an object of the required size. In both cases we allocate a
34216             // padding object (marked as a free object). This object's size is such that it will reverse
34217             // the alignment of the next header (asserted below).
34218             //
34219             // We allocate both together then decide based on the result whether we'll format the space as
34220             // free object + real object or real object + free object.
34221             ASSERT((Align(min_obj_size) & 7) == 4);
34222             CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext);
34223             if (freeobj)
34224             {
34225                 if (((size_t)freeobj & 7) == desiredAlignment)
34226                 {
34227                     // New allocation has desired alignment, return this one and place the free object at the
34228                     // end of the allocated space.
34229                     newAlloc = (Object*)freeobj;
34230                     freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
34231                 }
34232                 else
34233                 {
34234                     // New allocation is still mis-aligned, format the initial space as a free object and the
34235                     // rest of the space should be correctly aligned for the real object.
34236                     newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
34237                     ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34238                 }
34239                 freeobj->SetFree(min_obj_size);
34240             }
34241         }
34242     }
34243     else
34244     {
34245         // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
34246         // support mis-aligned object headers so we can't support biased headers as above. Luckily for us
34247         // we've managed to arrange things so the only case where we see a bias is for boxed value types and
34248         // these can never get large enough to be allocated on the LOH.
34249         ASSERT(65536 < LARGE_OBJECT_SIZE);
34250         ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);
34251
34252         alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
34253
34254         newAlloc = (Object*) hp->allocate_large_object (size, acontext->alloc_bytes_loh);
34255         ASSERT(((size_t)newAlloc & 7) == 0);
34256     }
34257
34258     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34259
34260 #ifdef TRACE_GC
34261 #ifdef COUNT_CYCLES
34262     finish = GetCycleCount32();
34263 #elif defined(ENABLE_INSTRUMENTATION)
34264     finish = GetInstLogTime();
34265 #endif //COUNT_CYCLES
34266     AllocDuration += finish - AllocStart;
34267     AllocCount++;
34268 #endif //TRACE_GC
34269     return newAlloc;
34270 #else
34271     UNREFERENCED_PARAMETER(_hp);
34272     UNREFERENCED_PARAMETER(acontext);
34273     UNREFERENCED_PARAMETER(size);
34274     UNREFERENCED_PARAMETER(flags);
34275     assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
34276     return nullptr;
34277 #endif // FEATURE_64BIT_ALIGNMENT
34278 }
34279
34280 Object *
34281 GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
34282 {
34283     CONTRACTL {
34284         NOTHROW;
34285         GC_TRIGGERS;
34286     } CONTRACTL_END;
34287
34288     TRIGGERSGC();
34289
34290     Object* newAlloc = NULL;
34291
34292 #ifdef TRACE_GC
34293 #ifdef COUNT_CYCLES
34294     AllocStart = GetCycleCount32();
34295     unsigned finish;
34296 #elif defined(ENABLE_INSTRUMENTATION)
34297     unsigned AllocStart = GetInstLogTime();
34298     unsigned finish;
34299 #endif //COUNT_CYCLES
34300 #endif //TRACE_GC
34301
34302 #ifdef MULTIPLE_HEAPS
34303     //take the first heap....
34304     gc_heap* hp = gc_heap::g_heaps[0];
34305 #else
34306     gc_heap* hp = pGenGCHeap;
34307 #ifdef _PREFAST_
34308     // prefix complains about us dereferencing hp in wks build even though we only access static members
34309     // this way. not sure how to shut it up except for this ugly workaround:
34310     PREFIX_ASSUME(hp != NULL);
34311 #endif //_PREFAST_
34312 #endif //MULTIPLE_HEAPS
34313
34314     alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
34315
34316     newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
34317 #ifdef FEATURE_STRUCTALIGN
34318     newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
34319 #endif // FEATURE_STRUCTALIGN
34320     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34321
34322 #ifdef TRACE_GC
34323 #ifdef COUNT_CYCLES
34324     finish = GetCycleCount32();
34325 #elif defined(ENABLE_INSTRUMENTATION)
34326     finish = GetInstLogTime();
34327 #endif //COUNT_CYCLES
34328     AllocDuration += finish - AllocStart;
34329     AllocCount++;
34330 #endif //TRACE_GC
34331     return newAlloc;
34332 }
34333
34334 Object*
34335 GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
34336 {
34337     CONTRACTL {
34338         NOTHROW;
34339         GC_TRIGGERS;
34340     } CONTRACTL_END;
34341
34342     TRIGGERSGC();
34343
34344     Object* newAlloc = NULL;
34345     alloc_context* acontext = static_cast<alloc_context*>(context);
34346
34347 #ifdef TRACE_GC
34348 #ifdef COUNT_CYCLES
34349     AllocStart = GetCycleCount32();
34350     unsigned finish;
34351 #elif defined(ENABLE_INSTRUMENTATION)
34352     unsigned AllocStart = GetInstLogTime();
34353     unsigned finish;
34354 #endif //COUNT_CYCLES
34355 #endif //TRACE_GC
34356
34357 #ifdef MULTIPLE_HEAPS
34358     if (acontext->get_alloc_heap() == 0)
34359     {
34360         AssignHeap (acontext);
34361         assert (acontext->get_alloc_heap());
34362     }
34363 #endif //MULTIPLE_HEAPS
34364
34365 #ifdef MULTIPLE_HEAPS
34366     gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
34367 #else
34368     gc_heap* hp = pGenGCHeap;
34369 #ifdef _PREFAST_
34370     // prefix complains about us dereferencing hp in wks build even though we only access static members
34371     // this way. not sure how to shut it up except for this ugly workaround:
34372     PREFIX_ASSUME(hp != NULL);
34373 #endif //_PREFAST_
34374 #endif //MULTIPLE_HEAPS
34375
34376     if (size < LARGE_OBJECT_SIZE)
34377     {
34378
34379 #ifdef TRACE_GC
34380         AllocSmallCount++;
34381 #endif //TRACE_GC
34382         newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext);
34383 #ifdef FEATURE_STRUCTALIGN
34384         newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
34385 #endif // FEATURE_STRUCTALIGN
34386 //        ASSERT (newAlloc);
34387     }
34388     else 
34389     {
34390         newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
34391 #ifdef FEATURE_STRUCTALIGN
34392         newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
34393 #endif // FEATURE_STRUCTALIGN
34394     }
34395
34396     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34397
34398 #ifdef TRACE_GC
34399 #ifdef COUNT_CYCLES
34400     finish = GetCycleCount32();
34401 #elif defined(ENABLE_INSTRUMENTATION)
34402     finish = GetInstLogTime();
34403 #endif //COUNT_CYCLES
34404     AllocDuration += finish - AllocStart;
34405     AllocCount++;
34406 #endif //TRACE_GC
34407     return newAlloc;
34408 }
34409
34410 void
34411 GCHeap::FixAllocContext (gc_alloc_context* context, bool lockp, void* arg, void *heap)
34412 {
34413     alloc_context* acontext = static_cast<alloc_context*>(context);
34414 #ifdef MULTIPLE_HEAPS
34415
34416     if (arg != 0)
34417         acontext->alloc_count = 0;
34418
34419     uint8_t * alloc_ptr = acontext->alloc_ptr;
34420
34421     if (!alloc_ptr)
34422         return;
34423
34424     // The acontext->alloc_heap can be out of sync with the ptrs because
34425     // of heap re-assignment in allocate
34426     gc_heap* hp = gc_heap::heap_of (alloc_ptr);
34427 #else
34428     gc_heap* hp = pGenGCHeap;
34429 #endif //MULTIPLE_HEAPS
34430
34431     if (heap == NULL || heap == hp)
34432     {
34433         if (lockp)
34434         {
34435             enter_spin_lock (&hp->more_space_lock);
34436         }
34437         hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE),
34438                                 get_alignment_constant(TRUE));
34439         if (lockp)
34440         {
34441             leave_spin_lock (&hp->more_space_lock);
34442         }
34443     }
34444 }
34445
34446 Object*
34447 GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly)
34448 {
34449     uint8_t *o = (uint8_t*)pInteriorPtr;
34450
34451     gc_heap* hp = gc_heap::heap_of (o);
34452
34453     uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address);
34454     uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address);
34455
34456     if (o >= lowest && o < highest)
34457     {
34458         o = hp->find_object (o, lowest);
34459     }
34460     else
34461     {
34462         o = NULL;
34463     }
34464     
34465     return (Object *)o;
34466 }
34467
34468 BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
34469 {
34470     if (dd_new_allocation (dd) < 0)
34471     {
34472         return TRUE;
34473     }
34474
34475     if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3))
34476     {
34477         return TRUE;
34478     }
34479
34480     return FALSE;
34481 }
34482
34483 //----------------------------------------------------------------------------
34484 // #GarbageCollector
34485 //
34486 //  API to ensure that a complete new garbage collection takes place
34487 //
34488 HRESULT
34489 GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
34490 {
34491 #if defined(BIT64) 
34492     if (low_memory_p)
34493     {
34494         size_t total_allocated = 0;
34495         size_t total_desired = 0;
34496 #ifdef MULTIPLE_HEAPS
34497         int hn = 0;
34498         for (hn = 0; hn < gc_heap::n_heaps; hn++)
34499         {
34500             gc_heap* hp = gc_heap::g_heaps [hn];
34501             total_desired += dd_desired_allocation (hp->dynamic_data_of (0));
34502             total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))-
34503                 dd_new_allocation (hp->dynamic_data_of (0));
34504         }
34505 #else
34506         gc_heap* hp = pGenGCHeap;
34507         total_desired = dd_desired_allocation (hp->dynamic_data_of (0));
34508         total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))-
34509             dd_new_allocation (hp->dynamic_data_of (0));
34510 #endif //MULTIPLE_HEAPS
34511
34512         if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent))
34513         {
34514             dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning",
34515                          total_allocated, total_desired));
34516
34517             return S_OK;
34518         }
34519     }
34520 #endif // BIT64 
34521
34522 #ifdef MULTIPLE_HEAPS
34523     gc_heap* hpt = gc_heap::g_heaps[0];
34524 #else
34525     gc_heap* hpt = 0;
34526 #endif //MULTIPLE_HEAPS
34527
34528     generation = (generation < 0) ? max_generation : min (generation, max_generation);
34529     dynamic_data* dd = hpt->dynamic_data_of (generation);
34530
34531 #ifdef BACKGROUND_GC
34532     if (recursive_gc_sync::background_running_p())
34533     {
34534         if ((mode == collection_optimized) || (mode & collection_non_blocking))
34535         {
34536             return S_OK;
34537         }
34538         if (mode & collection_blocking)
34539         {
34540             pGenGCHeap->background_gc_wait();
34541             if (mode & collection_optimized)
34542             {
34543                 return S_OK;
34544             }
34545         }
34546     }
34547 #endif //BACKGROUND_GC
34548
34549     if (mode & collection_optimized)
34550     {
34551         if (pGenGCHeap->gc_started)
34552         {
34553             return S_OK;
34554         }
34555         else 
34556         {
34557             BOOL should_collect = FALSE;
34558             BOOL should_check_loh = (generation == max_generation);
34559 #ifdef MULTIPLE_HEAPS
34560             for (int i = 0; i < gc_heap::n_heaps; i++)
34561             {
34562                 dynamic_data* dd1 = gc_heap::g_heaps [i]->dynamic_data_of (generation);
34563                 dynamic_data* dd2 = (should_check_loh ? 
34564                                      (gc_heap::g_heaps [i]->dynamic_data_of (max_generation + 1)) :
34565                                      0);
34566
34567                 if (should_collect_optimized (dd1, low_memory_p))
34568                 {
34569                     should_collect = TRUE;
34570                     break;
34571                 }
34572                 if (dd2 && should_collect_optimized (dd2, low_memory_p))
34573                 {
34574                     should_collect = TRUE;
34575                     break;
34576                 }
34577             }
34578 #else
34579             should_collect = should_collect_optimized (dd, low_memory_p);
34580             if (!should_collect && should_check_loh)
34581             {
34582                 should_collect = 
34583                     should_collect_optimized (hpt->dynamic_data_of (max_generation + 1), low_memory_p);
34584             }
34585 #endif //MULTIPLE_HEAPS
34586             if (!should_collect)
34587             {
34588                 return S_OK;
34589             }
34590         }
34591     }
34592
34593     size_t CollectionCountAtEntry = dd_collection_count (dd);
34594     size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking];
34595     size_t CurrentCollectionCount = 0;
34596
34597 retry:
34598
34599     CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode);
34600     
34601     if ((mode & collection_blocking) && 
34602         (generation == max_generation) && 
34603         (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
34604     {
34605 #ifdef BACKGROUND_GC
34606         if (recursive_gc_sync::background_running_p())
34607         {
34608             pGenGCHeap->background_gc_wait();
34609         }
34610 #endif //BACKGROUND_GC
34611
34612         goto retry;
34613     }
34614
34615     if (CollectionCountAtEntry == CurrentCollectionCount)
34616     {
34617         goto retry;
34618     }
34619
34620     return S_OK;
34621 }
34622
34623 size_t
34624 GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode)
34625 {
34626     int gen = (generation < 0) ? 
34627                max_generation : min (generation, max_generation);
34628
34629     gc_reason reason = reason_empty;
34630     
34631     if (low_memory_p) 
34632     {
34633         if (mode & collection_blocking)
34634             reason = reason_lowmemory_blocking;
34635         else
34636             reason = reason_lowmemory;
34637     }
34638     else
34639         reason = reason_induced;
34640
34641     if (reason == reason_induced)
34642     {
34643         if (mode & collection_compacting)
34644         {
34645             reason = reason_induced_compacting;
34646         }
34647         else if (mode & collection_non_blocking)
34648         {
34649             reason = reason_induced_noforce;
34650         }
34651 #ifdef STRESS_HEAP
34652         else if (mode & collection_gcstress)
34653         {
34654             reason = reason_gcstress;
34655         }
34656 #endif
34657     }
34658
34659     return GarbageCollectGeneration (gen, reason);
34660 }
34661
34662 void gc_heap::do_pre_gc()
34663 {
34664     STRESS_LOG_GC_STACK;
34665
34666 #ifdef STRESS_LOG
34667     STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index),
34668                         (uint32_t)settings.condemned_generation,
34669                         (uint32_t)settings.reason);
34670 #endif // STRESS_LOG
34671
34672 #ifdef MULTIPLE_HEAPS
34673     gc_heap* hp = g_heaps[0];
34674 #else
34675     gc_heap* hp = 0;
34676 #endif //MULTIPLE_HEAPS
34677
34678 #ifdef BACKGROUND_GC
34679     settings.b_state = hp->current_bgc_state;
34680 #endif //BACKGROUND_GC
34681
34682 #ifdef BACKGROUND_GC
34683     dprintf (1, ("*GC* %d(gen0:%d)(%d)(%s)(%d)", 
34684         VolatileLoad(&settings.gc_index), 
34685         dd_collection_count (hp->dynamic_data_of (0)),
34686         settings.condemned_generation,
34687         (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
34688         settings.b_state));
34689 #else
34690     dprintf (1, ("*GC* %d(gen0:%d)(%d)", 
34691         VolatileLoad(&settings.gc_index), 
34692         dd_collection_count(hp->dynamic_data_of(0)),
34693         settings.condemned_generation));
34694 #endif //BACKGROUND_GC
34695
34696     // TODO: this can happen...it's because of the way we are calling
34697     // do_pre_gc, will fix later.
34698     //if (last_gc_index > VolatileLoad(&settings.gc_index))
34699     //{
34700     //    FATAL_GC_ERROR();
34701     //}
34702
34703     last_gc_index = VolatileLoad(&settings.gc_index);
34704     GCHeap::UpdatePreGCCounters();
34705
34706     if (settings.concurrent)
34707     {
34708 #ifdef BACKGROUND_GC
34709         full_gc_counts[gc_type_background]++;
34710 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34711         GCHeap::gc_stress_fgcs_in_bgc = 0;
34712 #endif // STRESS_HEAP && !FEATURE_REDHAWK
34713 #endif // BACKGROUND_GC
34714     }
34715     else
34716     {
34717         if (settings.condemned_generation == max_generation)
34718         {
34719             full_gc_counts[gc_type_blocking]++;
34720         }
34721         else
34722         {
34723 #ifdef BACKGROUND_GC
34724             if (settings.background_p)
34725             {
34726                 ephemeral_fgc_counts[settings.condemned_generation]++;
34727             }
34728 #endif //BACKGROUND_GC
34729         }
34730     }
34731
34732 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34733     if (g_fEnableARM)
34734     {
34735         SystemDomain::ResetADSurvivedBytes();
34736     }
34737 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34738 }
34739
34740 #ifdef GC_CONFIG_DRIVEN
34741 void gc_heap::record_interesting_info_per_heap()
34742 {
34743     // datapoints are always from the last blocking GC so don't record again
34744     // for BGCs.
34745     if (!(settings.concurrent))
34746     {
34747         for (int i = 0; i < max_idp_count; i++)
34748         {
34749             interesting_data_per_heap[i] += interesting_data_per_gc[i];
34750         }
34751     }
34752
34753     int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
34754     if (compact_reason >= 0)
34755         (compact_reasons_per_heap[compact_reason])++;
34756     int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
34757     if (expand_mechanism >= 0)
34758         (expand_mechanisms_per_heap[expand_mechanism])++;
34759
34760     for (int i = 0; i < max_gc_mechanism_bits_count; i++)
34761     {
34762         if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i))
34763             (interesting_mechanism_bits_per_heap[i])++;
34764     }
34765
34766     //         h#  | GC  | gen | C   | EX  | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
34767     cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |",
34768             heap_number,
34769             (size_t)settings.gc_index,
34770             settings.condemned_generation,
34771             // TEMP - I am just doing this for wks GC 'cuase I wanna see the pattern of doing C/S GCs.
34772             (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction
34773             ((expand_mechanism >= 0)? "X" : ""), // EX
34774             ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF
34775             ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF
34776             (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML
34777             (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM
34778             interesting_data_per_gc[idp_pre_short],
34779             interesting_data_per_gc[idp_post_short],
34780             interesting_data_per_gc[idp_merged_pin],
34781             interesting_data_per_gc[idp_converted_pin],
34782             interesting_data_per_gc[idp_pre_pin],
34783             interesting_data_per_gc[idp_post_pin],
34784             interesting_data_per_gc[idp_pre_and_post_pin],
34785             interesting_data_per_gc[idp_pre_short_padded],
34786             interesting_data_per_gc[idp_post_short_padded]));
34787 }
34788
34789 void gc_heap::record_global_mechanisms()
34790 {
34791     for (int i = 0; i < max_global_mechanisms_count; i++)
34792     {
34793         if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i))
34794         {
34795             ::record_global_mechanism (i);
34796         }
34797     }
34798 }
34799
34800 BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p)
34801 {
34802     if (!compact_ratio)
34803         return (!compact_p);
34804
34805     size_t compact_count = compact_or_sweep_gcs[0];
34806     size_t sweep_count = compact_or_sweep_gcs[1];
34807
34808     size_t total_count = compact_count + sweep_count;
34809     BOOL should_compact = compact_p;
34810     if (total_count > 3)
34811     {
34812         if (compact_p)
34813         {
34814             int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1));
34815             if (temp_ratio > compact_ratio)
34816             {
34817                 // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n",
34818                 //     (compact_count + 1), (total_count + 1), temp_ratio));
34819                 should_compact = FALSE;
34820             }
34821         }
34822         else
34823         {
34824             int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1));
34825             if (temp_ratio > (100 - compact_ratio))
34826             {
34827                 // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n",
34828                 //     (sweep_count + 1), (total_count + 1), temp_ratio));
34829                 should_compact = TRUE;
34830             }
34831         }
34832     }
34833
34834     return !should_compact;
34835 }
34836 #endif //GC_CONFIG_DRIVEN
34837
34838 void gc_heap::do_post_gc()
34839 {
34840     if (!settings.concurrent)
34841     {
34842         initGCShadow();
34843     }
34844
34845 #ifdef TRACE_GC
34846 #ifdef COUNT_CYCLES
34847     AllocStart = GetCycleCount32();
34848 #else
34849     AllocStart = clock();
34850 #endif //COUNT_CYCLES
34851 #endif //TRACE_GC
34852
34853 #ifdef MULTIPLE_HEAPS
34854     gc_heap* hp = g_heaps[0];
34855 #else
34856     gc_heap* hp = 0;
34857 #endif //MULTIPLE_HEAPS
34858     
34859     GCToEEInterface::GcDone(settings.condemned_generation);
34860
34861     GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
34862                          (uint32_t)settings.condemned_generation,
34863                          (uint32_t)settings.reason,
34864                          !!settings.concurrent);
34865
34866     //dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)", 
34867     dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)", 
34868         VolatileLoad(&settings.gc_index), 
34869         dd_collection_count(hp->dynamic_data_of(0)),
34870         settings.condemned_generation,
34871         (settings.concurrent ? "BGC" : "GC")));
34872
34873     if (settings.exit_memory_load != 0)
34874         last_gc_memory_load = settings.exit_memory_load;
34875     else if (settings.entry_memory_load != 0)
34876         last_gc_memory_load = settings.entry_memory_load;
34877
34878     last_gc_heap_size = get_total_heap_size();
34879     last_gc_fragmentation = get_total_fragmentation();
34880
34881     GCHeap::UpdatePostGCCounters();
34882 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34883     //if (g_fEnableARM)
34884     //{
34885     //    SystemDomain::GetADSurvivedBytes();
34886     //}
34887 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34888
34889 #ifdef STRESS_LOG
34890     STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
34891                       (uint32_t)settings.condemned_generation,
34892                       (uint32_t)settings.reason);
34893 #endif // STRESS_LOG
34894
34895 #ifdef GC_CONFIG_DRIVEN
34896     if (!settings.concurrent)
34897     {
34898         if (settings.compaction)
34899             (compact_or_sweep_gcs[0])++;
34900         else
34901             (compact_or_sweep_gcs[1])++;
34902     }
34903
34904 #ifdef MULTIPLE_HEAPS
34905     for (int i = 0; i < n_heaps; i++)
34906         g_heaps[i]->record_interesting_info_per_heap();
34907 #else
34908     record_interesting_info_per_heap();
34909 #endif //MULTIPLE_HEAPS
34910     record_global_mechanisms();
34911 #endif //GC_CONFIG_DRIVEN
34912 }
34913
34914 unsigned GCHeap::GetGcCount()
34915 {
34916     return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index);
34917 }
34918
34919 size_t
34920 GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
34921 {
34922     dprintf (2, ("triggered a GC!"));
34923
34924 #ifdef MULTIPLE_HEAPS
34925     gc_heap* hpt = gc_heap::g_heaps[0];
34926 #else
34927     gc_heap* hpt = 0;
34928 #endif //MULTIPLE_HEAPS
34929     bool cooperative_mode = true;
34930     dynamic_data* dd = hpt->dynamic_data_of (gen);
34931     size_t localCount = dd_collection_count (dd);
34932
34933     enter_spin_lock (&gc_heap::gc_lock);
34934     dprintf (SPINLOCK_LOG, ("GC Egc"));
34935     ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock);
34936
34937     //don't trigger another GC if one was already in progress
34938     //while waiting for the lock
34939     {
34940         size_t col_count = dd_collection_count (dd);
34941
34942         if (localCount != col_count)
34943         {
34944 #ifdef SYNCHRONIZATION_STATS
34945             gc_lock_contended++;
34946 #endif //SYNCHRONIZATION_STATS
34947             dprintf (SPINLOCK_LOG, ("no need GC Lgc"));
34948             leave_spin_lock (&gc_heap::gc_lock);
34949
34950             // We don't need to release msl here 'cause this means a GC
34951             // has happened and would have release all msl's.
34952             return col_count;
34953          }
34954     }
34955
34956 #ifdef COUNT_CYCLES
34957     int gc_start = GetCycleCount32();
34958 #endif //COUNT_CYCLES
34959
34960 #ifdef TRACE_GC
34961 #ifdef COUNT_CYCLES
34962     AllocDuration += GetCycleCount32() - AllocStart;
34963 #else
34964     AllocDuration += clock() - AllocStart;
34965 #endif //COUNT_CYCLES
34966 #endif //TRACE_GC
34967
34968     gc_heap::g_low_memory_status = (reason == reason_lowmemory) || 
34969                                    (reason == reason_lowmemory_blocking) ||
34970                                    (gc_heap::latency_level == latency_level_memory_footprint);
34971
34972     gc_trigger_reason = reason;
34973
34974 #ifdef MULTIPLE_HEAPS
34975     for (int i = 0; i < gc_heap::n_heaps; i++)
34976     {
34977         gc_heap::g_heaps[i]->reset_gc_done();
34978     }
34979 #else
34980     gc_heap::reset_gc_done();
34981 #endif //MULTIPLE_HEAPS
34982
34983     gc_heap::gc_started = TRUE;
34984
34985     {
34986         init_sync_log_stats();
34987
34988 #ifndef MULTIPLE_HEAPS
34989         cooperative_mode = gc_heap::enable_preemptive ();
34990
34991         dprintf (2, ("Suspending EE"));
34992         BEGIN_TIMING(suspend_ee_during_log);
34993         GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
34994         END_TIMING(suspend_ee_during_log);
34995         gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc();
34996         gc_heap::disable_preemptive (cooperative_mode);
34997         if (gc_heap::proceed_with_gc_p)
34998             pGenGCHeap->settings.init_mechanisms();
34999         else
35000             gc_heap::update_collection_counts_for_no_gc();
35001
35002 #endif //!MULTIPLE_HEAPS
35003     }
35004
35005 // MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));
35006
35007 #ifdef TRACE_GC
35008 #ifdef COUNT_CYCLES
35009     unsigned start;
35010     unsigned finish;
35011     start = GetCycleCount32();
35012 #else
35013     clock_t start;
35014     clock_t finish;
35015     start = clock();
35016 #endif //COUNT_CYCLES
35017     PromotedObjectCount = 0;
35018 #endif //TRACE_GC
35019
35020     unsigned int condemned_generation_number = gen;
35021
35022     // We want to get a stack from the user thread that triggered the GC
35023     // instead of on the GC thread which is the case for Server GC.
35024     // But we are doing it for Workstation GC as well to be uniform.
35025     FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason));
35026
35027 #ifdef MULTIPLE_HEAPS
35028     GcCondemnedGeneration = condemned_generation_number;
35029
35030     cooperative_mode = gc_heap::enable_preemptive ();
35031
35032     BEGIN_TIMING(gc_during_log);
35033     gc_heap::ee_suspend_event.Set();
35034     gc_heap::wait_for_gc_done();
35035     END_TIMING(gc_during_log);
35036
35037     gc_heap::disable_preemptive (cooperative_mode);
35038
35039     condemned_generation_number = GcCondemnedGeneration;
35040 #else
35041     if (gc_heap::proceed_with_gc_p)
35042     {
35043         BEGIN_TIMING(gc_during_log);
35044         pGenGCHeap->garbage_collect (condemned_generation_number);
35045         END_TIMING(gc_during_log);
35046     }
35047 #endif //MULTIPLE_HEAPS
35048
35049 #ifdef TRACE_GC
35050 #ifdef COUNT_CYCLES
35051     finish = GetCycleCount32();
35052 #else
35053     finish = clock();
35054 #endif //COUNT_CYCLES
35055     GcDuration += finish - start;
35056     dprintf (3,
35057              ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
35058               VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
35059               finish - start, GcDuration,
35060               AllocCount ? (AllocDuration / AllocCount) : 0,
35061               AllocSmallCount, AllocBigCount));
35062     AllocCount = 0;
35063     AllocDuration = 0;
35064 #endif // TRACE_GC
35065
35066 #ifdef BACKGROUND_GC
35067     // We are deciding whether we should fire the alloc wait end event here
35068     // because in begin_foreground we could be calling end_foreground 
35069     // if we need to retry.
35070     if (gc_heap::alloc_wait_event_p)
35071     {
35072         hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc);
35073         gc_heap::alloc_wait_event_p = FALSE;
35074     }
35075 #endif //BACKGROUND_GC
35076
35077 #ifndef MULTIPLE_HEAPS
35078 #ifdef BACKGROUND_GC
35079     if (!gc_heap::dont_restart_ee_p)
35080     {
35081 #endif //BACKGROUND_GC
35082         BEGIN_TIMING(restart_ee_during_log);
35083         GCToEEInterface::RestartEE(TRUE);
35084         END_TIMING(restart_ee_during_log);
35085 #ifdef BACKGROUND_GC
35086     }
35087 #endif //BACKGROUND_GC
35088 #endif //!MULTIPLE_HEAPS
35089
35090 #ifdef COUNT_CYCLES
35091     printf ("GC: %d Time: %d\n", GcCondemnedGeneration,
35092             GetCycleCount32() - gc_start);
35093 #endif //COUNT_CYCLES
35094
35095 #ifndef MULTIPLE_HEAPS
35096     process_sync_log_stats();
35097     gc_heap::gc_started = FALSE;
35098     gc_heap::set_gc_done();
35099     dprintf (SPINLOCK_LOG, ("GC Lgc"));
35100     leave_spin_lock (&gc_heap::gc_lock);    
35101 #endif //!MULTIPLE_HEAPS
35102
35103 #ifdef FEATURE_PREMORTEM_FINALIZATION
35104     GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers);
35105 #endif // FEATURE_PREMORTEM_FINALIZATION
35106
35107     return dd_collection_count (dd);
35108 }
35109
35110 size_t      GCHeap::GetTotalBytesInUse ()
35111 {
35112 #ifdef MULTIPLE_HEAPS
35113     //enumarate all the heaps and get their size.
35114     size_t tot_size = 0;
35115     for (int i = 0; i < gc_heap::n_heaps; i++)
35116     {
35117         GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap;
35118         tot_size += Hp->ApproxTotalBytesInUse (FALSE);
35119     }
35120     return tot_size;
35121 #else
35122     return ApproxTotalBytesInUse ();
35123 #endif //MULTIPLE_HEAPS
35124 }
35125
35126 int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count)
35127 {
35128     if (get_bgc_fgc_count != 0)
35129     {
35130 #ifdef BACKGROUND_GC
35131         if (generation == max_generation)
35132         {
35133             return (int)(gc_heap::full_gc_counts[gc_type_background]);
35134         }
35135         else
35136         {
35137             return (int)(gc_heap::ephemeral_fgc_counts[generation]);
35138         }
35139 #else
35140         return 0;
35141 #endif //BACKGROUND_GC
35142     }
35143
35144 #ifdef MULTIPLE_HEAPS
35145     gc_heap* hp = gc_heap::g_heaps [0];
35146 #else  //MULTIPLE_HEAPS
35147     gc_heap* hp = pGenGCHeap;
35148 #endif //MULTIPLE_HEAPS
35149     if (generation > max_generation)
35150         return 0;
35151     else
35152         return (int)dd_collection_count (hp->dynamic_data_of (generation));
35153 }
35154
35155 size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
35156 {
35157     size_t totsize = 0;
35158     //GCTODO
35159     //ASSERT(InMustComplete());
35160     enter_spin_lock (&pGenGCHeap->gc_lock);
35161
35162     heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
35163     // Get small block heap size info
35164     totsize = (pGenGCHeap->alloc_allocated - heap_segment_mem (eph_seg));
35165     heap_segment* seg1 = generation_start_segment (pGenGCHeap->generation_of (max_generation));
35166     while (seg1 != eph_seg)
35167     {
35168         totsize += heap_segment_allocated (seg1) -
35169             heap_segment_mem (seg1);
35170         seg1 = heap_segment_next (seg1);
35171     }
35172
35173     //discount the fragmentation
35174     for (int i = 0; i <= max_generation; i++)
35175     {
35176         generation* gen = pGenGCHeap->generation_of (i);
35177         totsize -= (generation_free_list_space (gen) + generation_free_obj_space (gen));
35178     }
35179
35180     if (!small_heap_only)
35181     {
35182         heap_segment* seg2 = generation_start_segment (pGenGCHeap->generation_of (max_generation+1));
35183
35184         while (seg2 != 0)
35185         {
35186             totsize += heap_segment_allocated (seg2) -
35187                 heap_segment_mem (seg2);
35188             seg2 = heap_segment_next (seg2);
35189         }
35190
35191         //discount the fragmentation
35192         generation* loh_gen = pGenGCHeap->generation_of (max_generation+1);
35193         size_t frag = generation_free_list_space (loh_gen) + generation_free_obj_space (loh_gen);
35194         totsize -= frag;
35195     }
35196     leave_spin_lock (&pGenGCHeap->gc_lock);
35197     return totsize;
35198 }
35199
35200 #ifdef MULTIPLE_HEAPS
35201 void GCHeap::AssignHeap (alloc_context* acontext)
35202 {
35203     // Assign heap based on processor
35204     acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext, 0)));
35205     acontext->set_home_heap(acontext->get_alloc_heap());
35206 }
35207 GCHeap* GCHeap::GetHeap (int n)
35208 {
35209     assert (n < gc_heap::n_heaps);
35210     return gc_heap::g_heaps [n]->vm_heap;
35211 }
35212 #endif //MULTIPLE_HEAPS
35213
35214 bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
35215 {
35216     alloc_context* acontext = static_cast<alloc_context*>(context);
35217 #ifdef MULTIPLE_HEAPS
35218     return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
35219             ((acontext->get_home_heap() == 0) && (thread_number == 0)));
35220 #else
35221     UNREFERENCED_PARAMETER(acontext);
35222     UNREFERENCED_PARAMETER(thread_number);
35223     return true;
35224 #endif //MULTIPLE_HEAPS
35225 }
35226
35227 // Returns the number of processors required to trigger the use of thread based allocation contexts
35228 int GCHeap::GetNumberOfHeaps ()
35229 {
35230 #ifdef MULTIPLE_HEAPS
35231     return gc_heap::n_heaps;
35232 #else
35233     return 1;
35234 #endif //MULTIPLE_HEAPS
35235 }
35236
35237 /*
35238   in this way we spend extra time cycling through all the heaps while create the handle
35239   it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number)
35240 */
35241 int GCHeap::GetHomeHeapNumber ()
35242 {
35243 #ifdef MULTIPLE_HEAPS
35244     Thread *pThread = GCToEEInterface::GetThread();
35245     for (int i = 0; i < gc_heap::n_heaps; i++)
35246     {
35247         if (pThread)
35248         {
35249             gc_alloc_context* ctx = GCToEEInterface::GetAllocContext();
35250             GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
35251             if (hp == gc_heap::g_heaps[i]->vm_heap) return i;
35252         }
35253     }
35254     return 0;
35255 #else
35256     return 0;
35257 #endif //MULTIPLE_HEAPS
35258 }
35259
35260 unsigned int GCHeap::GetCondemnedGeneration()
35261
35262     return gc_heap::settings.condemned_generation;
35263 }
35264
35265 void GCHeap::GetMemoryInfo(uint32_t* highMemLoadThreshold, 
35266                            uint64_t* totalPhysicalMem, 
35267                            uint32_t* lastRecordedMemLoad,
35268                            size_t* lastRecordedHeapSize,
35269                            size_t* lastRecordedFragmentation)
35270 {
35271     *highMemLoadThreshold = gc_heap::high_memory_load_th;
35272     *totalPhysicalMem = gc_heap::total_physical_mem;
35273     *lastRecordedMemLoad = gc_heap::last_gc_memory_load;
35274     *lastRecordedHeapSize = gc_heap::last_gc_heap_size;
35275     *lastRecordedFragmentation = gc_heap::last_gc_fragmentation;
35276 }
35277
35278 int GCHeap::GetGcLatencyMode()
35279 {
35280     return (int)(pGenGCHeap->settings.pause_mode);
35281 }
35282
35283 int GCHeap::SetGcLatencyMode (int newLatencyMode)
35284 {
35285     if (gc_heap::settings.pause_mode == pause_no_gc)
35286         return (int)set_pause_mode_no_gc;
35287
35288     gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode;
35289
35290     if (new_mode == pause_low_latency)
35291     {
35292 #ifndef MULTIPLE_HEAPS
35293         pGenGCHeap->settings.pause_mode = new_mode;
35294 #endif //!MULTIPLE_HEAPS
35295     }
35296     else if (new_mode == pause_sustained_low_latency)
35297     {
35298 #ifdef BACKGROUND_GC
35299         if (gc_heap::gc_can_use_concurrent)
35300         {
35301             pGenGCHeap->settings.pause_mode = new_mode;
35302         }
35303 #endif //BACKGROUND_GC
35304     }
35305     else
35306     {
35307         pGenGCHeap->settings.pause_mode = new_mode;
35308     }
35309
35310 #ifdef BACKGROUND_GC
35311     if (recursive_gc_sync::background_running_p())
35312     {
35313         // If we get here, it means we are doing an FGC. If the pause
35314         // mode was altered we will need to save it in the BGC settings.
35315         if (gc_heap::saved_bgc_settings.pause_mode != new_mode)
35316         {
35317             gc_heap::saved_bgc_settings.pause_mode = new_mode;
35318         }
35319     }
35320 #endif //BACKGROUND_GC
35321
35322     return (int)set_pause_mode_success;
35323 }
35324
35325 int GCHeap::GetLOHCompactionMode()
35326 {
35327     return pGenGCHeap->loh_compaction_mode;
35328 }
35329
35330 void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode)
35331 {
35332 #ifdef FEATURE_LOH_COMPACTION
35333     pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionyMode;
35334 #endif //FEATURE_LOH_COMPACTION
35335 }
35336
35337 bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
35338                                            uint32_t lohPercentage)
35339 {
35340 #ifdef MULTIPLE_HEAPS
35341     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35342     {
35343         gc_heap* hp = gc_heap::g_heaps [hn];
35344         hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0));
35345     }
35346 #else //MULTIPLE_HEAPS
35347     pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0));
35348 #endif //MULTIPLE_HEAPS
35349
35350     pGenGCHeap->full_gc_approach_event.Reset();
35351     pGenGCHeap->full_gc_end_event.Reset();
35352     pGenGCHeap->full_gc_approach_event_set = false;
35353
35354     pGenGCHeap->fgn_maxgen_percent = gen2Percentage;
35355     pGenGCHeap->fgn_loh_percent = lohPercentage;
35356
35357     return TRUE;
35358 }
35359
35360 bool GCHeap::CancelFullGCNotification()
35361 {
35362     pGenGCHeap->fgn_maxgen_percent = 0;
35363     pGenGCHeap->fgn_loh_percent = 0;
35364
35365     pGenGCHeap->full_gc_approach_event.Set();
35366     pGenGCHeap->full_gc_end_event.Set();
35367     
35368     return TRUE;
35369 }
35370
35371 int GCHeap::WaitForFullGCApproach(int millisecondsTimeout)
35372 {
35373     dprintf (2, ("WFGA: Begin wait"));
35374     int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout);
35375     dprintf (2, ("WFGA: End wait"));
35376     return result;
35377 }
35378
35379 int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
35380 {
35381     dprintf (2, ("WFGE: Begin wait"));
35382     int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout);
35383     dprintf (2, ("WFGE: End wait"));
35384     return result;
35385 }
35386
35387 int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
35388 {
35389     NoGCRegionLockHolder lh;
35390
35391     dprintf (1, ("begin no gc called"));
35392     start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC);
35393     if (status == start_no_gc_success)
35394     {
35395         GarbageCollect (max_generation);
35396         status = gc_heap::get_start_no_gc_region_status();
35397     }
35398
35399     if (status != start_no_gc_success)
35400         gc_heap::handle_failure_for_no_gc();
35401
35402     return (int)status;
35403 }
35404
35405 int GCHeap::EndNoGCRegion()
35406 {
35407     NoGCRegionLockHolder lh;
35408     return (int)gc_heap::end_no_gc_region();
35409 }
35410
35411 void GCHeap::PublishObject (uint8_t* Obj)
35412 {
35413 #ifdef BACKGROUND_GC
35414     gc_heap* hp = gc_heap::heap_of (Obj);
35415     hp->bgc_alloc_lock->loh_alloc_done (Obj);
35416 #endif //BACKGROUND_GC
35417 }
35418
35419 // The spec for this one isn't clear. This function
35420 // returns the size that can be allocated without
35421 // triggering a GC of any kind.
35422 size_t GCHeap::ApproxFreeBytes()
35423 {
35424     //GCTODO
35425     //ASSERT(InMustComplete());
35426     enter_spin_lock (&pGenGCHeap->gc_lock);
35427
35428     generation* gen = pGenGCHeap->generation_of (0);
35429     size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen);
35430
35431     leave_spin_lock (&pGenGCHeap->gc_lock);
35432
35433     return res;
35434 }
35435
35436 HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
35437 {
35438     if ((gen < 0) || (gen > max_generation))
35439         return E_FAIL;
35440 #ifdef MULTIPLE_HEAPS
35441     counters->current_size = 0;
35442     counters->promoted_size = 0;
35443     counters->collection_count = 0;
35444
35445     //enumarate all the heaps and get their counters.
35446     for (int i = 0; i < gc_heap::n_heaps; i++)
35447     {
35448         dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen);
35449
35450         counters->current_size += dd_current_size (dd);
35451         counters->promoted_size += dd_promoted_size (dd);
35452         if (i == 0)
35453         counters->collection_count += dd_collection_count (dd);
35454     }
35455 #else
35456     dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen);
35457     counters->current_size = dd_current_size (dd);
35458     counters->promoted_size = dd_promoted_size (dd);
35459     counters->collection_count = dd_collection_count (dd);
35460 #endif //MULTIPLE_HEAPS
35461     return S_OK;
35462 }
35463
35464 // Get the segment size to use, making sure it conforms.
35465 size_t GCHeap::GetValidSegmentSize(bool large_seg)
35466 {
35467     return get_valid_segment_size (large_seg);
35468 }
35469
35470 // Get the max gen0 heap size, making sure it conforms.
35471 size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
35472 {
35473     size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
35474
35475     if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size))
35476     {
35477 #ifdef SERVER_GC
35478         // performance data seems to indicate halving the size results
35479         // in optimal perf.  Ask for adjusted gen0 size.
35480         gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024));
35481
35482         // if gen0 size is too large given the available memory, reduce it.
35483         // Get true cache size, as we don't want to reduce below this.
35484         size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024));
35485         dprintf (2, ("cache: %Id-%Id, cpu: %Id", 
35486             GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),
35487             GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)));
35488
35489         int n_heaps = gc_heap::n_heaps;
35490 #else //SERVER_GC
35491         size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE);
35492         gen0size = max((4*trueSize/5),(256*1024));
35493         trueSize = max(trueSize, (256*1024));
35494         int n_heaps = 1;
35495 #endif //SERVER_GC
35496
35497         // if the total min GC across heaps will exceed 1/6th of available memory,
35498         // then reduce the min GC size until it either fits or has been reduced to cache size.
35499         while ((gen0size * n_heaps) > GCToOSInterface::GetPhysicalMemoryLimit() / 6)
35500         {
35501             gen0size = gen0size / 2;
35502             if (gen0size <= trueSize)
35503             {
35504                 gen0size = trueSize;
35505                 break;
35506             }
35507         }
35508     }
35509
35510     // Generation 0 must never be more than 1/2 the segment size.
35511     if (gen0size >= (seg_size / 2))
35512         gen0size = seg_size / 2;
35513
35514     return (gen0size);
35515 }
35516
35517 void GCHeap::SetReservedVMLimit (size_t vmlimit)
35518 {
35519     gc_heap::reserved_memory_limit = vmlimit;
35520 }
35521
35522
35523 //versions of same method on each heap
35524
35525 #ifdef FEATURE_PREMORTEM_FINALIZATION
35526
35527 Object* GCHeap::GetNextFinalizableObject()
35528 {
35529
35530 #ifdef MULTIPLE_HEAPS
35531
35532     //return the first non critical one in the first queue.
35533     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35534     {
35535         gc_heap* hp = gc_heap::g_heaps [hn];
35536         Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE);
35537         if (O)
35538             return O;
35539     }
35540     //return the first non crtitical/critical one in the first queue.
35541     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35542     {
35543         gc_heap* hp = gc_heap::g_heaps [hn];
35544         Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE);
35545         if (O)
35546             return O;
35547     }
35548     return 0;
35549
35550
35551 #else //MULTIPLE_HEAPS
35552     return pGenGCHeap->finalize_queue->GetNextFinalizableObject();
35553 #endif //MULTIPLE_HEAPS
35554
35555 }
35556
35557 size_t GCHeap::GetNumberFinalizableObjects()
35558 {
35559 #ifdef MULTIPLE_HEAPS
35560     size_t cnt = 0;
35561     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35562     {
35563         gc_heap* hp = gc_heap::g_heaps [hn];
35564         cnt += hp->finalize_queue->GetNumberFinalizableObjects();
35565     }
35566     return cnt;
35567
35568
35569 #else //MULTIPLE_HEAPS
35570     return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects();
35571 #endif //MULTIPLE_HEAPS
35572 }
35573
35574 size_t GCHeap::GetFinalizablePromotedCount()
35575 {
35576 #ifdef MULTIPLE_HEAPS
35577     size_t cnt = 0;
35578
35579     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35580     {
35581         gc_heap* hp = gc_heap::g_heaps [hn];
35582         cnt += hp->finalize_queue->GetPromotedCount();
35583     }
35584     return cnt;
35585
35586 #else //MULTIPLE_HEAPS
35587     return pGenGCHeap->finalize_queue->GetPromotedCount();
35588 #endif //MULTIPLE_HEAPS
35589 }
35590
35591 bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
35592 {
35593 #ifdef MULTIPLE_HEAPS
35594     bool foundp = false;
35595     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35596     {
35597         gc_heap* hp = gc_heap::g_heaps [hn];
35598         if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
35599             foundp = true;
35600     }
35601     return foundp;
35602
35603 #else //MULTIPLE_HEAPS
35604     return pGenGCHeap->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers);
35605 #endif //MULTIPLE_HEAPS
35606 }
35607
35608 bool GCHeap::ShouldRestartFinalizerWatchDog()
35609 {
35610     // This condition was historically used as part of the condition to detect finalizer thread timeouts
35611     return gc_heap::gc_lock.lock != -1;
35612 }
35613
35614 void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
35615 {
35616 #ifdef MULTIPLE_HEAPS
35617     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
35618     {
35619         gc_heap* hp = gc_heap::g_heaps [hn];
35620         hp->finalize_queue->SetSegForShutDown(fHasLock);
35621     }
35622
35623 #else //MULTIPLE_HEAPS
35624     pGenGCHeap->finalize_queue->SetSegForShutDown(fHasLock);
35625 #endif //MULTIPLE_HEAPS
35626 }
35627
35628 //---------------------------------------------------------------------------
35629 // Finalized class tracking
35630 //---------------------------------------------------------------------------
35631
35632 bool GCHeap::RegisterForFinalization (int gen, Object* obj)
35633 {
35634     if (gen == -1)
35635         gen = 0;
35636     if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
35637     {
35638         //just reset the bit
35639         ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
35640         return true;
35641     }
35642     else
35643     {
35644         gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj);
35645         return hp->finalize_queue->RegisterForFinalization (gen, obj);
35646     }
35647 }
35648
35649 void GCHeap::SetFinalizationRun (Object* obj)
35650 {
35651     ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN);
35652 }
35653
35654
35655 //--------------------------------------------------------------------
35656 //
35657 //          Support for finalization
35658 //
35659 //--------------------------------------------------------------------
35660
35661 inline
35662 unsigned int gen_segment (int gen)
35663 {
35664     assert (((signed)NUMBERGENERATIONS - gen - 1)>=0);
35665     return (NUMBERGENERATIONS - gen - 1);
35666 }
35667
35668 bool CFinalize::Initialize()
35669 {
35670     CONTRACTL {
35671         NOTHROW;
35672         GC_NOTRIGGER;
35673     } CONTRACTL_END;
35674
35675     m_Array = new (nothrow)(Object*[100]);
35676
35677     if (!m_Array)
35678     {
35679         ASSERT (m_Array);
35680         STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
35681         if (GCConfig::GetBreakOnOOM())
35682         {
35683             GCToOSInterface::DebugBreak();
35684         }
35685         return false;
35686     }
35687     m_EndArray = &m_Array[100];
35688
35689     for (int i =0; i < FreeList; i++)
35690     {
35691         SegQueueLimit (i) = m_Array;
35692     }
35693     m_PromotedCount = 0;
35694     lock = -1;
35695 #ifdef _DEBUG
35696     lockowner_threadid.Clear();
35697 #endif // _DEBUG
35698
35699     return true;
35700 }
35701
35702 CFinalize::~CFinalize()
35703 {
35704     delete m_Array;
35705 }
35706
35707 size_t CFinalize::GetPromotedCount ()
35708 {
35709     return m_PromotedCount;
35710 }
35711
35712 inline
35713 void CFinalize::EnterFinalizeLock()
35714 {
35715     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
35716              GCToEEInterface::GetThread() == 0 ||
35717              GCToEEInterface::IsPreemptiveGCDisabled());
35718
35719 retry:
35720     if (Interlocked::CompareExchange(&lock, 0, -1) >= 0)
35721     {
35722         unsigned int i = 0;
35723         while (lock >= 0)
35724         {
35725             YieldProcessor();           // indicate to the processor that we are spining
35726             if (++i & 7)
35727                 GCToOSInterface::YieldThread (0);
35728             else
35729                 GCToOSInterface::Sleep (5);
35730         }
35731         goto retry;
35732     }
35733
35734 #ifdef _DEBUG
35735     lockowner_threadid.SetToCurrentThread();
35736 #endif // _DEBUG
35737 }
35738
35739 inline
35740 void CFinalize::LeaveFinalizeLock()
35741 {
35742     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
35743              GCToEEInterface::GetThread() == 0 ||
35744              GCToEEInterface::IsPreemptiveGCDisabled());
35745
35746 #ifdef _DEBUG
35747     lockowner_threadid.Clear();
35748 #endif // _DEBUG
35749     lock = -1;
35750 }
35751
35752 bool
35753 CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
35754 {
35755     CONTRACTL {
35756         NOTHROW;
35757         GC_NOTRIGGER;
35758     } CONTRACTL_END;
35759
35760     EnterFinalizeLock();
35761     // Adjust gen
35762     unsigned int dest = 0;
35763
35764     if (g_fFinalizerRunOnShutDown)
35765     {
35766         //no method table available yet,
35767         //put it in the finalizer queue and sort out when
35768         //dequeueing
35769         dest = FinalizerListSeg;
35770     }
35771
35772     else
35773         dest = gen_segment (gen);
35774
35775     // Adjust boundary for segments so that GC will keep objects alive.
35776     Object*** s_i = &SegQueue (FreeList);
35777     if ((*s_i) == m_EndArray)
35778     {
35779         if (!GrowArray())
35780         {
35781             LeaveFinalizeLock();
35782             if (method_table(obj) == NULL)
35783             {
35784                 // If the object is uninitialized, a valid size should have been passed.
35785                 assert (size >= Align (min_obj_size));
35786                 dprintf (3, ("Making unused array [%Ix, %Ix[", (size_t)obj, (size_t)(obj+size)));
35787                 ((CObjectHeader*)obj)->SetFree(size);
35788             }
35789             STRESS_LOG_OOM_STACK(0);
35790             if (GCConfig::GetBreakOnOOM())
35791             {
35792                 GCToOSInterface::DebugBreak();
35793             }
35794             return false;
35795         }
35796     }
35797     Object*** end_si = &SegQueueLimit (dest);
35798     do
35799     {
35800         //is the segment empty?
35801         if (!(*s_i == *(s_i-1)))
35802         {
35803             //no, swap the end elements.
35804             *(*s_i) = *(*(s_i-1));
35805         }
35806         //increment the fill pointer
35807         (*s_i)++;
35808         //go to the next segment.
35809         s_i--;
35810     } while (s_i > end_si);
35811
35812     // We have reached the destination segment
35813     // store the object
35814     **s_i = obj;
35815     // increment the fill pointer
35816     (*s_i)++;
35817
35818     LeaveFinalizeLock();
35819
35820     return true;
35821 }
35822
35823 Object*
35824 CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
35825 {
35826     Object* obj = 0;
35827     //serialize
35828     EnterFinalizeLock();
35829
35830 retry:
35831     if (!IsSegEmpty(FinalizerListSeg))
35832     {
35833         if (g_fFinalizerRunOnShutDown)
35834         {
35835             obj = *(SegQueueLimit (FinalizerListSeg)-1);
35836             if (method_table(obj)->HasCriticalFinalizer())
35837             {
35838                 MoveItem ((SegQueueLimit (FinalizerListSeg)-1),
35839                           FinalizerListSeg, CriticalFinalizerListSeg);
35840                 goto retry;
35841             }
35842             else
35843                 --SegQueueLimit (FinalizerListSeg);
35844         }
35845         else
35846             obj =  *(--SegQueueLimit (FinalizerListSeg));
35847
35848     }
35849     else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg))
35850     {
35851         //the FinalizerList is empty, we can adjust both
35852         // limit instead of moving the object to the free list
35853         obj =  *(--SegQueueLimit (CriticalFinalizerListSeg));
35854         --SegQueueLimit (FinalizerListSeg);
35855     }
35856     if (obj)
35857     {
35858         dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj)));
35859     }
35860     LeaveFinalizeLock();
35861     return obj;
35862 }
35863
35864 void
35865 CFinalize::SetSegForShutDown(BOOL fHasLock)
35866 {
35867     int i;
35868
35869     if (!fHasLock)
35870         EnterFinalizeLock();
35871     for (i = 0; i <= max_generation; i++)
35872     {
35873         unsigned int seg = gen_segment (i);
35874         Object** startIndex = SegQueueLimit (seg)-1;
35875         Object** stopIndex  = SegQueue (seg);
35876         for (Object** po = startIndex; po >= stopIndex; po--)
35877         {
35878             Object* obj = *po;
35879             if (method_table(obj)->HasCriticalFinalizer())
35880             {
35881                 MoveItem (po, seg, CriticalFinalizerListSeg);
35882             }
35883             else
35884             {
35885                 MoveItem (po, seg, FinalizerListSeg);
35886             }
35887         }
35888     }
35889     if (!fHasLock)
35890         LeaveFinalizeLock();
35891 }
35892
35893 void
35894 CFinalize::DiscardNonCriticalObjects()
35895 {
35896     //empty the finalization queue
35897     Object** startIndex = SegQueueLimit (FinalizerListSeg)-1;
35898     Object** stopIndex  = SegQueue (FinalizerListSeg);
35899     for (Object** po = startIndex; po >= stopIndex; po--)
35900     {
35901         MoveItem (po, FinalizerListSeg, FreeList);
35902     }
35903 }
35904
35905 size_t
35906 CFinalize::GetNumberFinalizableObjects()
35907 {
35908     return SegQueueLimit (FinalizerListSeg) -
35909         (g_fFinalizerRunOnShutDown ? m_Array : SegQueue(FinalizerListSeg));
35910 }
35911
35912 BOOL
35913 CFinalize::FinalizeSegForAppDomain (void *pDomain, 
35914                                     BOOL fRunFinalizers, 
35915                                     unsigned int Seg)
35916 {
35917     BOOL finalizedFound = FALSE;
35918     Object** endIndex = SegQueue (Seg);
35919     for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
35920     {
35921         CObjectHeader* obj = (CObjectHeader*)*i;
35922
35923         // Objects are put into the finalization queue before they are complete (ie their methodtable
35924         // may be null) so we must check that the object we found has a method table before checking
35925         // if it has the index we are looking for. If the methodtable is null, it can't be from the
35926         // unloading domain, so skip it.
35927         if (method_table(obj) == NULL)
35928         {
35929             continue;
35930         }
35931
35932         // does the EE actually want us to finalize this object?
35933         if (!GCToEEInterface::ShouldFinalizeObjectForUnload(pDomain, obj))
35934         {
35935             continue;
35936         }
35937
35938         if (!fRunFinalizers || (obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
35939         {
35940             //remove the object because we don't want to
35941             //run the finalizer
35942             MoveItem (i, Seg, FreeList);
35943             //Reset the bit so it will be put back on the queue
35944             //if resurrected and re-registered.
35945             obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
35946         }
35947         else
35948         {
35949             if (method_table(obj)->HasCriticalFinalizer())
35950             {
35951                 finalizedFound = TRUE;
35952                 MoveItem (i, Seg, CriticalFinalizerListSeg);
35953             }
35954             else
35955             {
35956                 if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
35957                 {
35958                     MoveItem (i, Seg, FreeList);
35959                 }
35960                 else
35961                 {
35962                     finalizedFound = TRUE;
35963                     MoveItem (i, Seg, FinalizerListSeg);
35964                 }
35965             }
35966         }
35967     }
35968
35969     return finalizedFound;
35970 }
35971
35972 bool
35973 CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
35974 {
35975     bool finalizedFound = false;
35976
35977     unsigned int startSeg = gen_segment (max_generation);
35978
35979     EnterFinalizeLock();
35980
35981     for (unsigned int Seg = startSeg; Seg <= gen_segment (0); Seg++)
35982     {
35983         if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
35984         {
35985             finalizedFound = true;
35986         }
35987     }
35988
35989     LeaveFinalizeLock();
35990
35991     return finalizedFound;
35992 }
35993
35994 void
35995 CFinalize::MoveItem (Object** fromIndex,
35996                      unsigned int fromSeg,
35997                      unsigned int toSeg)
35998 {
35999
36000     int step;
36001     ASSERT (fromSeg != toSeg);
36002     if (fromSeg > toSeg)
36003         step = -1;
36004     else
36005         step = +1;
36006     // Place the element at the boundary closest to dest
36007     Object** srcIndex = fromIndex;
36008     for (unsigned int i = fromSeg; i != toSeg; i+= step)
36009     {
36010         Object**& destFill = m_FillPointers[i+(step - 1 )/2];
36011         Object** destIndex = destFill - (step + 1)/2;
36012         if (srcIndex != destIndex)
36013         {
36014             Object* tmp = *srcIndex;
36015             *srcIndex = *destIndex;
36016             *destIndex = tmp;
36017         }
36018         destFill -= step;
36019         srcIndex = destIndex;
36020     }
36021 }
36022
36023 void
36024 CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
36025 {
36026     ScanContext sc;
36027     if (pSC == 0)
36028         pSC = &sc;
36029
36030     pSC->thread_number = hn;
36031
36032     //scan the finalization queue
36033     Object** startIndex  = SegQueue (CriticalFinalizerListSeg);
36034     Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
36035
36036     for (Object** po = startIndex; po < stopIndex; po++)
36037     {
36038         Object* o = *po;
36039         //dprintf (3, ("scan freacheable %Ix", (size_t)o));
36040         dprintf (3, ("scan f %Ix", (size_t)o));
36041 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
36042         if (g_fEnableARM)
36043         {
36044             pSC->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(o->GetAppDomainIndex());
36045         }
36046 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
36047
36048         (*fn)(po, pSC, 0);
36049     }
36050 }
36051
36052 void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
36053 {
36054     Object** startIndex = SegQueue (CriticalFinalizerListSeg);
36055     Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
36056     Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
36057     for (Object** po = startIndex; po < stopIndex; po++)
36058     {
36059         //report *po
36060         fn(po < stopCriticalIndex, *po);
36061     }
36062 }
36063
36064 BOOL
36065 CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
36066                                 gc_heap* hp)
36067 {
36068     ScanContext sc;
36069     sc.promotion = TRUE;
36070 #ifdef MULTIPLE_HEAPS
36071     sc.thread_number = hp->heap_number;
36072 #else
36073     UNREFERENCED_PARAMETER(hp);
36074 #endif //MULTIPLE_HEAPS
36075
36076     BOOL finalizedFound = FALSE;
36077
36078     //start with gen and explore all the younger generations.
36079     unsigned int startSeg = gen_segment (gen);
36080     {
36081         m_PromotedCount = 0;
36082         for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++)
36083         {
36084             Object** endIndex = SegQueue (Seg);
36085             for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
36086             {
36087                 CObjectHeader* obj = (CObjectHeader*)*i;
36088                 dprintf (3, ("scanning: %Ix", (size_t)obj));
36089                 if (!g_theGCHeap->IsPromoted (obj))
36090                 {
36091                     dprintf (3, ("freacheable: %Ix", (size_t)obj));
36092
36093                     assert (method_table(obj)->HasFinalizer());
36094
36095                     if (GCToEEInterface::EagerFinalized(obj))
36096                     {
36097                         MoveItem (i, Seg, FreeList);
36098                     }
36099                     else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
36100                     {
36101                         //remove the object because we don't want to
36102                         //run the finalizer
36103
36104                         MoveItem (i, Seg, FreeList);
36105
36106                         //Reset the bit so it will be put back on the queue
36107                         //if resurrected and re-registered.
36108                         obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
36109
36110                     }
36111                     else
36112                     {
36113                         m_PromotedCount++;
36114
36115                         if (method_table(obj)->HasCriticalFinalizer())
36116                         {
36117                             MoveItem (i, Seg, CriticalFinalizerListSeg);
36118                         }
36119                         else
36120                         {
36121                             MoveItem (i, Seg, FinalizerListSeg);
36122                         }
36123                     }
36124                 }
36125 #ifdef BACKGROUND_GC
36126                 else
36127                 {
36128                     if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
36129                     {
36130                         // TODO - fix the following line.
36131                         //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
36132                         dprintf (3, ("%Ix is marked", (size_t)obj));
36133                     }
36134                 }
36135 #endif //BACKGROUND_GC
36136             }
36137         }
36138     }
36139     finalizedFound = !IsSegEmpty(FinalizerListSeg) ||
36140                      !IsSegEmpty(CriticalFinalizerListSeg);
36141                     
36142     if (finalizedFound)
36143     {
36144         //Promote the f-reachable objects
36145         GcScanRoots (pfn,
36146 #ifdef MULTIPLE_HEAPS
36147                      hp->heap_number
36148 #else
36149                      0
36150 #endif //MULTIPLE_HEAPS
36151                      , 0);
36152
36153         hp->settings.found_finalizers = TRUE;
36154
36155 #ifdef BACKGROUND_GC
36156         if (hp->settings.concurrent)
36157         {
36158             hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg));
36159         }
36160 #endif //BACKGROUND_GC
36161         if (hp->settings.concurrent && hp->settings.found_finalizers)
36162         {
36163             if (!mark_only_p)
36164                 GCToEEInterface::EnableFinalization(true);
36165         }
36166     }
36167
36168     return finalizedFound;
36169 }
36170
36171 //Relocates all of the objects in the finalization array
36172 void
36173 CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
36174 {
36175     ScanContext sc;
36176     sc.promotion = FALSE;
36177 #ifdef MULTIPLE_HEAPS
36178     sc.thread_number = hp->heap_number;
36179 #else
36180     UNREFERENCED_PARAMETER(hp);
36181 #endif //MULTIPLE_HEAPS
36182
36183     unsigned int Seg = gen_segment (gen);
36184
36185     Object** startIndex = SegQueue (Seg);
36186     for (Object** po = startIndex; po < SegQueue (FreeList);po++)
36187     {
36188         GCHeap::Relocate (po, &sc);
36189     }
36190 }
36191
36192 void
36193 CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
36194 {
36195     // update the generation fill pointers.
36196     // if gen_0_empty is FALSE, test each object to find out if
36197     // it was promoted or not
36198     if (gen_0_empty_p)
36199     {
36200         for (int i = min (gen+1, max_generation); i > 0; i--)
36201         {
36202             m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)];
36203         }
36204     }
36205     else
36206     {
36207         //Look for demoted or promoted plugs
36208
36209         for (int i = gen; i >= 0; i--)
36210         {
36211             unsigned int Seg = gen_segment (i);
36212             Object** startIndex = SegQueue (Seg);
36213
36214             for (Object** po = startIndex;
36215                  po < SegQueueLimit (gen_segment(i)); po++)
36216             {
36217                 int new_gen = g_theGCHeap->WhichGeneration (*po);
36218                 if (new_gen != i)
36219                 {
36220                     if (new_gen > i)
36221                     {
36222                         //promotion
36223                         MoveItem (po, gen_segment (i), gen_segment (new_gen));
36224                     }
36225                     else
36226                     {
36227                         //demotion
36228                         MoveItem (po, gen_segment (i), gen_segment (new_gen));
36229                         //back down in order to see all objects.
36230                         po--;
36231                     }
36232                 }
36233
36234             }
36235         }
36236     }
36237 }
36238
36239 BOOL
36240 CFinalize::GrowArray()
36241 {
36242     size_t oldArraySize = (m_EndArray - m_Array);
36243     size_t newArraySize =  (size_t)(((float)oldArraySize / 10) * 12);
36244
36245     Object** newArray = new (nothrow) Object*[newArraySize];
36246     if (!newArray)
36247     {
36248         // It's not safe to throw here, because of the FinalizeLock.  Tell our caller
36249         // to throw for us.
36250 //        ASSERT (newArray);
36251         return FALSE;
36252     }
36253     memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
36254
36255     //adjust the fill pointers
36256     for (int i = 0; i < FreeList; i++)
36257     {
36258         m_FillPointers [i] += (newArray - m_Array);
36259     }
36260     delete m_Array;
36261     m_Array = newArray;
36262     m_EndArray = &m_Array [newArraySize];
36263
36264     return TRUE;
36265 }
36266
36267 #ifdef VERIFY_HEAP
36268 void CFinalize::CheckFinalizerObjects()
36269 {
36270     for (int i = 0; i <= max_generation; i++)
36271     {
36272         Object **startIndex = SegQueue (gen_segment (i));
36273         Object **stopIndex  = SegQueueLimit (gen_segment (i));
36274
36275         for (Object **po = startIndex; po < stopIndex; po++)
36276         {
36277             if ((int)g_theGCHeap->WhichGeneration (*po) < i)
36278                 FATAL_GC_ERROR ();
36279             ((CObjectHeader*)*po)->Validate();
36280         }
36281     }
36282 }
36283 #endif //VERIFY_HEAP
36284
36285 #endif // FEATURE_PREMORTEM_FINALIZATION
36286
36287
36288 //------------------------------------------------------------------------------
36289 //
36290 //                      End of VM specific support
36291 //
36292 //------------------------------------------------------------------------------
36293 void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
36294 {
36295     generation* gen = gc_heap::generation_of (gen_number);
36296     heap_segment*    seg = generation_start_segment (gen);
36297     uint8_t*       x = ((gen_number == max_generation) ? heap_segment_mem (seg) :
36298                      generation_allocation_start (gen));
36299
36300     uint8_t*       end = heap_segment_allocated (seg);
36301     BOOL small_object_segments = TRUE;
36302     int align_const = get_alignment_constant (small_object_segments);
36303
36304     while (1)
36305
36306     {
36307         if (x >= end)
36308         {
36309             if ((seg = heap_segment_next (seg)) != 0)
36310             {
36311                 x = heap_segment_mem (seg);
36312                 end = heap_segment_allocated (seg);
36313                 continue;
36314             }
36315             else
36316             {
36317                 if (small_object_segments && walk_large_object_heap_p)
36318
36319                 {
36320                     small_object_segments = FALSE;
36321                     align_const = get_alignment_constant (small_object_segments);
36322                     seg = generation_start_segment (large_object_generation);
36323                     x = heap_segment_mem (seg);
36324                     end = heap_segment_allocated (seg);
36325                     continue;
36326                 }
36327                 else
36328                 {
36329                     break;
36330                 }
36331             }
36332         }
36333
36334         size_t s = size (x);
36335         CObjectHeader* o = (CObjectHeader*)x;
36336
36337         if (!o->IsFree())
36338
36339         {
36340             _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point
36341
36342             if (!fn (o->GetObjectBase(), context))
36343                 return;
36344         }
36345         x = x + Align (s, align_const);
36346     }
36347 }
36348
36349 void gc_heap::walk_finalize_queue (fq_walk_fn fn)
36350 {
36351 #ifdef FEATURE_PREMORTEM_FINALIZATION
36352     finalize_queue->WalkFReachableObjects (fn);
36353 #endif //FEATURE_PREMORTEM_FINALIZATION
36354 }
36355
36356 void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
36357 {
36358 #ifdef MULTIPLE_HEAPS
36359     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36360     {
36361         gc_heap* hp = gc_heap::g_heaps [hn];
36362
36363         hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
36364     }
36365 #else
36366     walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
36367 #endif //MULTIPLE_HEAPS
36368 }
36369
36370 void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
36371 {
36372     uint8_t* o = (uint8_t*)obj;
36373     if (o)
36374     {
36375         go_through_object_cl (method_table (o), o, size(o), oo,
36376                                     {
36377                                         if (*oo)
36378                                         {
36379                                             Object *oh = (Object*)*oo;
36380                                             if (!fn (oh, context))
36381                                                 return;
36382                                         }
36383                                     }
36384             );
36385     }
36386 }
36387
36388 void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
36389 {
36390     gc_heap* hp = (gc_heap*)gc_context;
36391     hp->walk_survivors (fn, diag_context, type);
36392 }
36393
36394 void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
36395 {
36396     gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
36397 }
36398
36399 void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
36400 {
36401     gc_heap* hp = (gc_heap*)gc_context;
36402     hp->walk_finalize_queue (fn);
36403 }
36404
36405 void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
36406 {
36407 #ifdef MULTIPLE_HEAPS
36408     for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36409     {
36410         gc_heap* hp = gc_heap::g_heaps [hn];
36411         hp->finalize_queue->GcScanRoots(fn, hn, sc);
36412     }
36413 #else
36414         pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
36415 #endif //MULTIPLE_HEAPS
36416 }
36417
36418 void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
36419 {
36420     UNREFERENCED_PARAMETER(gen_number);
36421     GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
36422 }
36423
36424 void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
36425 {
36426     UNREFERENCED_PARAMETER(gen_number);
36427     GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
36428 }
36429
36430 // Go through and touch (read) each page straddled by a memory block.
36431 void TouchPages(void * pStart, size_t cb)
36432 {
36433     const uint32_t pagesize = OS_PAGE_SIZE;
36434     _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
36435     if (cb)
36436     {
36437         VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
36438         VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) -  (((size_t)pStart) & (pagesize-1)));
36439         while (p < pEnd)
36440         {
36441             char a;
36442             a = VolatileLoad(p);
36443             //printf("Touching page %lxh\n", (uint32_t)p);
36444             p += pagesize;
36445         }
36446     }
36447 }
36448
36449 #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
36450     // This code is designed to catch the failure to update the write barrier
36451     // The way it works is to copy the whole heap right after every GC.  The write
36452     // barrier code has been modified so that it updates the shadow as well as the
36453     // real GC heap.  Before doing the next GC, we walk the heap, looking for pointers
36454     // that were updated in the real heap, but not the shadow.  A mismatch indicates
36455     // an error.  The offending code can be found by breaking after the correct GC,
36456     // and then placing a data breakpoint on the Heap location that was updated without
36457     // going through the write barrier.
36458
36459     // Called at process shutdown
36460 void deleteGCShadow()
36461 {
36462     if (g_GCShadow != 0)
36463         GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
36464     g_GCShadow = 0;
36465     g_GCShadowEnd = 0;
36466 }
36467
36468     // Called at startup and right after a GC, get a snapshot of the GC Heap
36469 void initGCShadow()
36470 {
36471     if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK))
36472         return;
36473
36474     size_t len = g_gc_highest_address - g_gc_lowest_address;
36475     if (len > (size_t)(g_GCShadowEnd - g_GCShadow)) 
36476     {
36477         deleteGCShadow();
36478         g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
36479         if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
36480         {
36481             _ASSERTE(!"Not enough memory to run HeapVerify level 2");
36482             // If after the assert we decide to allow the program to continue 
36483             // running we need to be in a state that will not trigger any 
36484             // additional AVs while we fail to allocate a shadow segment, i.e. 
36485             // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV
36486             deleteGCShadow();
36487             return;
36488         }
36489
36490         g_GCShadowEnd += len;
36491     }
36492
36493     // save the value of g_gc_lowest_address at this time.  If this value changes before
36494     // the next call to checkGCWriteBarrier() it means we extended the heap (with a
36495     // large object segment most probably), and the whole shadow segment is inconsistent.
36496     g_shadow_lowest_address = g_gc_lowest_address;
36497
36498         //****** Copy the whole GC heap ******
36499     //
36500     // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment())
36501     // can produce a NULL result.  This is because the initialization has not completed.
36502     //
36503     generation* gen = gc_heap::generation_of (max_generation);
36504     heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
36505
36506     ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
36507     BOOL small_object_segments = TRUE;
36508     while(1)
36509     {
36510         if (!seg)
36511         {
36512             if (small_object_segments)
36513             {
36514                 small_object_segments = FALSE;
36515                 seg = heap_segment_rw (generation_start_segment (gc_heap::generation_of (max_generation+1)));
36516                 continue;
36517             }
36518             else
36519                 break;
36520         }
36521             // Copy the segment
36522         uint8_t* start = heap_segment_mem(seg);
36523         uint8_t* end = heap_segment_allocated (seg);
36524         memcpy(start + delta, start, end - start);
36525         seg = heap_segment_next_rw (seg);
36526     }
36527 }
36528
36529 #define INVALIDGCVALUE (void*)((size_t)0xcccccccd)
36530
36531     // test to see if 'ptr' was only updated via the write barrier.
36532 inline void testGCShadow(Object** ptr)
36533 {
36534     Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
36535     if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
36536     {
36537
36538         // If you get this assertion, someone updated a GC pointer in the heap without
36539         // using the write barrier.  To find out who, check the value of 
36540         // dd_collection_count (dynamic_data_of (0)). Also
36541         // note the value of 'ptr'.  Rerun the App that the previous GC just occurred.
36542         // Then put a data breakpoint for the value of 'ptr'  Then check every write
36543         // to pointer between the two GCs.  The last one is not using the write barrier.
36544
36545         // If the memory of interest does not exist at system startup,
36546         // you need to set the data breakpoint right after the memory gets committed
36547         // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr'
36548         // in the memory window.  run until the memory gets mapped. Then you can set
36549         // your breakpoint
36550
36551         // Note a recent change, we've identified race conditions when updating the gc shadow.
36552         // Throughout the runtime, code will update an address in the gc heap, then erect the
36553         // write barrier, which calls updateGCShadow. With an app that pounds one heap location
36554         // from multiple threads, you can hit this assert even though all involved are using the
36555         // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE.
36556         // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been
36557         // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr,
36558         // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting
36559         // TODO: erroneous asserts in here.
36560
36561         if(*shadow!=INVALIDGCVALUE)
36562         {
36563 #ifdef FEATURE_BASICFREEZE
36564             // Write barriers for stores of references to frozen objects may be optimized away.
36565             if (!gc_heap::frozen_object_p(*ptr))
36566 #endif // FEATURE_BASICFREEZE
36567             {
36568                 _ASSERTE(!"Pointer updated without using write barrier");
36569             }
36570         }
36571         /*
36572         else
36573         {
36574              printf("saw a INVALIDGCVALUE. (just to let you know)\n");
36575         }
36576         */
36577     }
36578 }
36579
36580 void testGCShadowHelper (uint8_t* x)
36581 {
36582     size_t s = size (x);
36583     if (contain_pointers (x))
36584     {
36585         go_through_object_nostart (method_table(x), x, s, oo,
36586                            { testGCShadow((Object**) oo); });
36587     }
36588 }
36589
36590     // Walk the whole heap, looking for pointers that were not updated with the write barrier.
36591 void checkGCWriteBarrier()
36592 {
36593     // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
36594     // and the GC shadow segment did not track that change!
36595     if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
36596     {
36597         // No shadow stack, nothing to check.
36598         return;
36599     }
36600
36601     {
36602         generation* gen = gc_heap::generation_of (max_generation);
36603         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
36604
36605         PREFIX_ASSUME(seg != NULL);
36606
36607         while(seg)
36608         {
36609             uint8_t* x = heap_segment_mem(seg);
36610             while (x < heap_segment_allocated (seg))
36611             {
36612                 size_t s = size (x);
36613                 testGCShadowHelper (x);
36614                 x = x + Align (s);
36615             }
36616             seg = heap_segment_next_rw (seg);
36617         }
36618     }
36619
36620     {
36621         // go through large object heap
36622         int alignment = get_alignment_constant(FALSE);
36623         generation* gen = gc_heap::generation_of (max_generation+1);
36624         heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
36625
36626         PREFIX_ASSUME(seg != NULL);
36627
36628         while(seg)
36629         {
36630             uint8_t* x = heap_segment_mem(seg);
36631             while (x < heap_segment_allocated (seg))
36632             {
36633                 size_t s = size (x);
36634                 testGCShadowHelper (x);
36635                 x = x + Align (s, alignment);
36636             }
36637             seg = heap_segment_next_rw (seg);
36638         }
36639     }
36640 }
36641 #endif //WRITE_BARRIER_CHECK && !SERVER_GC
36642
36643 #endif // !DACCESS_COMPILE
36644
36645 #ifdef FEATURE_BASICFREEZE
36646 void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef)
36647 {
36648 #ifdef DACCESS_COMPILE
36649     UNREFERENCED_PARAMETER(seg);
36650     UNREFERENCED_PARAMETER(pvContext);
36651     UNREFERENCED_PARAMETER(pfnMethodTable);
36652     UNREFERENCED_PARAMETER(pfnObjRef);
36653 #else
36654     uint8_t *o = heap_segment_mem(seg);
36655
36656     // small heap alignment constant
36657     int alignment = get_alignment_constant(TRUE);
36658
36659     while (o < heap_segment_allocated(seg))
36660     {
36661         pfnMethodTable(pvContext, o);
36662
36663         if (contain_pointers (o))
36664         {
36665             go_through_object_nostart (method_table (o), o, size(o), oo,
36666                    {
36667                        if (*oo)
36668                            pfnObjRef(pvContext, oo);
36669                    }
36670             );
36671         }
36672
36673         o += Align(size(o), alignment);
36674     }
36675 #endif //!DACCESS_COMPILE
36676 }
36677 #endif // FEATURE_BASICFREEZE
36678
36679 #ifndef DACCESS_COMPILE
36680 HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
36681 {
36682 #ifdef BACKGROUND_GC
36683     if (recursive_gc_sync::background_running_p())
36684     {
36685         uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
36686         if (dwRet == WAIT_OBJECT_0)
36687             return S_OK;
36688         else if (dwRet == WAIT_TIMEOUT)
36689             return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
36690         else
36691             return E_FAIL;      // It is not clear if what the last error would be if the wait failed,
36692                                 // as there are too many layers in between. The best we can do is to return E_FAIL;
36693     }
36694 #endif
36695
36696     return S_OK;
36697 }
36698 #endif // !DACCESS_COMPILE
36699
36700 void GCHeap::TemporaryEnableConcurrentGC()
36701 {
36702 #ifdef BACKGROUND_GC
36703     gc_heap::temp_disable_concurrent_p = false;
36704 #endif //BACKGROUND_GC
36705 }
36706
36707 void GCHeap::TemporaryDisableConcurrentGC()
36708 {
36709 #ifdef BACKGROUND_GC
36710     gc_heap::temp_disable_concurrent_p = true;
36711 #endif //BACKGROUND_GC
36712 }
36713
36714 bool GCHeap::IsConcurrentGCEnabled()
36715 {
36716 #ifdef BACKGROUND_GC
36717     return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
36718 #else
36719     return FALSE;
36720 #endif //BACKGROUND_GC
36721 }
36722
36723 void GCHeap::SetFinalizeRunOnShutdown(bool value)
36724 {
36725     g_fFinalizerRunOnShutDown = value;
36726 }
36727
36728 void PopulateDacVars(GcDacVars *gcDacVars)
36729 {
36730 #ifndef DACCESS_COMPILE
36731     assert(gcDacVars != nullptr);
36732     *gcDacVars = {};
36733     gcDacVars->major_version_number = 1;
36734     gcDacVars->minor_version_number = 0;
36735     gcDacVars->built_with_svr = &g_built_with_svr_gc;
36736     gcDacVars->build_variant = &g_build_variant;
36737     gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt);
36738     gcDacVars->generation_size = sizeof(generation);
36739     gcDacVars->max_gen = &g_max_generation;
36740 #ifndef MULTIPLE_HEAPS
36741     gcDacVars->mark_array = &gc_heap::mark_array;
36742     gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment);
36743     gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state);
36744     gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg);
36745     gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start;
36746     gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address;
36747     gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address;
36748     gcDacVars->alloc_allocated = &gc_heap::alloc_allocated;
36749     gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj;
36750     gcDacVars->oom_info = &gc_heap::oom_info;
36751     gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue);
36752     gcDacVars->generation_table = reinterpret_cast<dac_generation**>(&gc_heap::generation_table);
36753 #ifdef GC_CONFIG_DRIVEN
36754     gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms);
36755     gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap);
36756     gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap);
36757     gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap);
36758     gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap);
36759 #endif // GC_CONFIG_DRIVEN
36760 #ifdef HEAP_ANALYZE
36761     gcDacVars->internal_root_array = &gc_heap::internal_root_array;
36762     gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index;
36763     gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success;
36764 #endif // HEAP_ANALYZE
36765 #else
36766     gcDacVars->n_heaps = &gc_heap::n_heaps;
36767     gcDacVars->g_heaps = reinterpret_cast<dac_gc_heap***>(&gc_heap::g_heaps);
36768 #endif // MULTIPLE_HEAPS
36769 #endif // DACCESS_COMPILE
36770 }