1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
9 // GC automatically manages memory allocated by managed code.
10 // The design doc for GC can be found at Documentation/botr/garbage-collection.md
12 // This file includes both the code for GC and the allocator. The most common
13 // case for a GC to be triggered is from the allocator code. See
14 // code:#try_allocate_more_space where it calls GarbageCollectGeneration.
16 // Entry points for the allocator are GCHeap::Alloc* which are called by the
17 // allocation helpers in gcscan.cpp
24 // We just needed a simple random number generator for testing.
30 static uint64_t get_rand()
32 x = (314159269*x+278281) & 0x7FFFFFFF;
36 // obtain random number in the range 0 .. r-1
37 static uint64_t get_rand(uint64_t r) {
39 uint64_t x = (uint64_t)((get_rand() * r) >> 31);
44 uint64_t gc_rand::x = 0;
46 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
47 BOOL bgc_heap_walk_for_etw_p = FALSE;
48 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
50 #if defined(FEATURE_REDHAWK)
51 #define MAYBE_UNUSED_VAR(v) v = v
53 #define MAYBE_UNUSED_VAR(v)
54 #endif // FEATURE_REDHAWK
56 #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
57 #define commit_min_th (16*OS_PAGE_SIZE)
60 #define partial_size_th 100
61 #define num_partial_refs 64
63 #define partial_size_th 100
64 #define num_partial_refs 32
67 #define demotion_plug_len_th (6*1024*1024)
70 #define MARK_STACK_INITIAL_LENGTH 1024
72 #define MARK_STACK_INITIAL_LENGTH 128
75 #define LOH_PIN_QUEUE_LENGTH 100
76 #define LOH_PIN_DECAY 10
78 uint32_t yp_spin_count_unit = 0;
79 size_t loh_size_threshold = LARGE_OBJECT_SIZE;
81 #ifdef GC_CONFIG_DRIVEN
82 int compact_ratio = 0;
83 #endif //GC_CONFIG_DRIVEN
85 // See comments in reset_memory.
86 BOOL reset_mm_p = TRUE;
88 bool g_fFinalizerRunOnShutDown = false;
91 bool g_built_with_svr_gc = true;
93 bool g_built_with_svr_gc = false;
94 #endif // FEATURE_SVR_GC
96 #if defined(BUILDENV_DEBUG)
97 uint8_t g_build_variant = 0;
98 #elif defined(BUILDENV_CHECKED)
99 uint8_t g_build_variant = 1;
101 uint8_t g_build_variant = 2;
102 #endif // defined(BUILDENV_DEBUG)
104 VOLATILE(int32_t) g_no_gc_lock = -1;
106 #if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
107 const char * const allocation_state_str[] = {
116 "try_free_full_seg_in_bgc",
117 "try_free_after_bgc",
120 "acquire_seg_after_cg",
121 "acquire_seg_after_bgc",
122 "check_and_wait_for_bgc",
123 "trigger_full_compact_gc",
124 "trigger_ephemeral_gc",
125 "trigger_2nd_ephemeral_gc",
129 const char * const msl_take_state_str[] = {
145 #endif //TRACE_GC && !DACCESS_COMPILE
148 // Keep this in sync with the definition of gc_reason
149 #if (defined(DT_LOG) || defined(TRACE_GC)) && !defined (DACCESS_COMPILE)
150 static const char* const str_gc_reasons[] =
162 "induced_compacting",
165 "lowmemory_host_blocking"
168 static const char* const str_gc_pause_modes[] =
173 "sustained_low_latency",
176 #endif // defined(DT_LOG) || defined(TRACE_GC)
179 BOOL is_induced (gc_reason reason)
181 return ((reason == reason_induced) ||
182 (reason == reason_induced_noforce) ||
183 (reason == reason_lowmemory) ||
184 (reason == reason_lowmemory_blocking) ||
185 (reason == reason_induced_compacting) ||
186 (reason == reason_lowmemory_host) ||
187 (reason == reason_lowmemory_host_blocking));
191 BOOL is_induced_blocking (gc_reason reason)
193 return ((reason == reason_induced) ||
194 (reason == reason_lowmemory_blocking) ||
195 (reason == reason_induced_compacting) ||
196 (reason == reason_lowmemory_host_blocking));
199 #ifndef DACCESS_COMPILE
202 size_t GetHighPrecisionTimeStamp()
204 int64_t ts = GCToOSInterface::QueryPerformanceCounter();
206 return (size_t)(ts / (qpf / 1000));
211 // There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
212 // interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
213 // Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
214 // comparison with the global min/max.
215 GCStatistics g_GCStatistics;
216 GCStatistics g_LastGCStatistics;
218 char* GCStatistics::logFileName = NULL;
219 FILE* GCStatistics::logFile = NULL;
221 void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
224 if (settings.concurrent)
226 bgc.Accumulate((uint32_t)timeInMSec*1000);
229 else if (settings.background_p)
231 fgc.Accumulate((uint32_t)timeInMSec*1000);
233 if (settings.compaction)
235 assert(settings.condemned_generation < max_generation);
236 cntFGCGen[settings.condemned_generation]++;
239 #endif // BACKGROUND_GC
241 ngc.Accumulate((uint32_t)timeInMSec*1000);
243 if (settings.compaction)
245 cntNGCGen[settings.condemned_generation]++;
248 if (is_induced (settings.reason))
249 cntReasons[(int)reason_induced]++;
250 else if (settings.stress_induced)
251 cntReasons[(int)reason_gcstress]++;
253 cntReasons[(int)settings.reason]++;
256 if (settings.concurrent || !settings.background_p)
258 #endif // BACKGROUND_GC
262 #endif // BACKGROUND_GC
265 void GCStatistics::Initialize()
267 LIMITED_METHOD_CONTRACT;
268 // for efficiency sake we're taking a dependency on the layout of a C++ object
269 // with a vtable. protect against violations of our premise:
270 static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
271 "The first field of GCStatistics follows the pointer sized vtable");
273 int podOffs = offsetof(GCStatistics, cntDisplay); // offset of the first POD field
274 memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
275 memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
278 void GCStatistics::DisplayAndUpdate()
280 LIMITED_METHOD_CONTRACT;
282 if (logFileName == NULL || logFile == NULL)
287 fprintf(logFile, "\nGCMix **** Initialize *****\n\n");
289 fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);
291 // NGC summary (total, timing info)
292 ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);
294 // FGC summary (total, timing info)
295 fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);
298 bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);
300 // NGC/FGC break out by generation & compacting vs. sweeping
301 fprintf(logFile, "NGC ");
302 for (int i = max_generation; i >= 0; --i)
303 fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
304 fprintf(logFile, "\n");
306 fprintf(logFile, "FGC ");
307 for (int i = max_generation-1; i >= 0; --i)
308 fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
309 fprintf(logFile, "\n");
311 // Compacting vs. Sweeping break out
312 int _cntSweep = cntNGC-cntCompactNGC;
313 int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
314 fprintf(logFile, "NGC Sweeping %d (%d) Compacting %d (%d)\n",
315 _cntSweep - _cntLastSweep, _cntSweep,
316 cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);
318 _cntSweep = cntFGC-cntCompactFGC;
319 _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
320 fprintf(logFile, "FGC Sweeping %d (%d) Compacting %d (%d)\n",
321 _cntSweep - _cntLastSweep, _cntSweep,
322 cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);
326 for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
328 if (cntReasons[reason] != 0)
329 fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason],
330 cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
333 fprintf(logFile, "\n\n");
335 // flush the log file...
339 g_LastGCStatistics = *this;
349 size_t round_up_power2 (size_t size)
351 // Get the 0-based index of the most-significant bit in size-1.
352 // If the call failed (because size-1 is zero), size must be 1,
353 // so return 1 (because 1 rounds up to itself).
354 DWORD highest_set_bit_index;
361 &highest_set_bit_index, size - 1)) { return 1; }
363 // The size == 0 case (which would have overflowed to SIZE_MAX when decremented)
364 // is handled below by relying on the fact that highest_set_bit_index is the maximum value
365 // (31 or 63, depending on sizeof(size_t)) and left-shifting a value >= 2 by that
366 // number of bits shifts in zeros from the right, resulting in an output of zero.
367 return static_cast<size_t>(2) << highest_set_bit_index;
371 size_t round_down_power2 (size_t size)
373 // Get the 0-based index of the most-significant bit in size.
374 // If the call failed, size must be zero so return zero.
375 DWORD highest_set_bit_index;
382 &highest_set_bit_index, size)) { return 0; }
384 // Left-shift 1 by highest_set_bit_index to get back a value containing only
385 // the most-significant set bit of size, i.e. size rounded down
386 // to the next power-of-two value.
387 return static_cast<size_t>(1) << highest_set_bit_index;
390 // Get the 0-based index of the most-significant bit in the value.
391 // Returns -1 if the input value is zero (i.e. has no set bits).
393 int index_of_highest_set_bit (size_t value)
395 // Get the 0-based index of the most-significant bit in the value.
396 // If the call failed (because value is zero), return -1.
397 DWORD highest_set_bit_index;
404 &highest_set_bit_index, value)) ? -1 : static_cast<int>(highest_set_bit_index);
408 int relative_index_power2_plug (size_t power2)
410 int index = index_of_highest_set_bit (power2);
411 assert (index <= MAX_INDEX_POWER2);
413 return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2));
417 int relative_index_power2_free_space (size_t power2)
419 int index = index_of_highest_set_bit (power2);
420 assert (index <= MAX_INDEX_POWER2);
422 return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2));
426 uint32_t bgc_alloc_spin_count = 140;
427 uint32_t bgc_alloc_spin_count_loh = 16;
428 uint32_t bgc_alloc_spin = 2;
432 void c_write (uint32_t& place, uint32_t value)
434 Interlocked::Exchange (&place, value);
438 #ifndef DACCESS_COMPILE
439 // If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC.
440 const size_t bgc_min_per_heap = 4*1024*1024;
442 int gc_heap::gchist_index = 0;
443 gc_mechanisms_store gc_heap::gchist[max_history_count];
445 #ifndef MULTIPLE_HEAPS
446 size_t gc_heap::total_promoted_bytes = 0;
447 VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process;
448 int gc_heap::gchist_index_per_heap = 0;
449 gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count];
450 #endif //MULTIPLE_HEAPS
452 void gc_heap::add_to_history_per_heap()
455 gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap];
456 current_hist->gc_index = settings.gc_index;
457 current_hist->current_bgc_state = current_bgc_state;
458 size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0));
459 current_hist->gc_time_ms = (uint32_t)elapsed;
460 current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes);
461 current_hist->eph_low = generation_allocation_start (generation_of (max_generation-1));
462 current_hist->gen0_start = generation_allocation_start (generation_of (0));
463 current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment);
465 current_hist->bgc_lowest = background_saved_lowest_address;
466 current_hist->bgc_highest = background_saved_highest_address;
467 #endif //BACKGROUND_GC
468 current_hist->fgc_lowest = lowest_address;
469 current_hist->fgc_highest = highest_address;
470 current_hist->g_lowest = g_gc_lowest_address;
471 current_hist->g_highest = g_gc_highest_address;
473 gchist_index_per_heap++;
474 if (gchist_index_per_heap == max_history_count)
476 gchist_index_per_heap = 0;
481 void gc_heap::add_to_history()
484 gc_mechanisms_store* current_settings = &gchist[gchist_index];
485 current_settings->store (&settings);
488 if (gchist_index == max_history_count)
495 #endif //DACCESS_COMPILE
496 #endif //BACKGROUND_GC
498 #if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
499 BOOL gc_log_on = TRUE;
501 size_t gc_log_file_size = 0;
503 size_t gc_buffer_index = 0;
504 size_t max_gc_buffers = 0;
506 static CLRCriticalSection gc_log_lock;
508 // we keep this much in a buffer and only flush when the buffer is full
509 #define gc_log_buffer_size (1024*1024)
510 uint8_t* gc_log_buffer = 0;
511 size_t gc_log_buffer_offset = 0;
513 void log_va_msg(const char *fmt, va_list args)
517 const int BUFFERSIZE = 512;
518 static char rgchBuffer[BUFFERSIZE];
519 char * pBuffer = &rgchBuffer[0];
522 int buffer_start = 1;
523 int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging());
524 buffer_start += pid_len;
525 memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
526 int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args);
529 msg_len = BUFFERSIZE - buffer_start;
532 msg_len += buffer_start;
534 if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12))
537 memset (index_str, '-', 8);
538 sprintf_s (index_str, _countof(index_str), "%d", (int)gc_buffer_index);
539 gc_log_buffer[gc_log_buffer_offset] = '\n';
540 memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8);
543 if (gc_buffer_index > max_gc_buffers)
545 fseek (gc_log, 0, SEEK_SET);
548 fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
550 memset (gc_log_buffer, '*', gc_log_buffer_size);
551 gc_log_buffer_offset = 0;
554 memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
555 gc_log_buffer_offset += msg_len;
560 void GCLog (const char *fmt, ... )
562 if (gc_log_on && (gc_log != NULL))
566 log_va_msg (fmt, args);
570 #endif // TRACE_GC && !DACCESS_COMPILE
572 #if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)
574 BOOL gc_config_log_on = FALSE;
575 FILE* gc_config_log = NULL;
577 // we keep this much in a buffer and only flush when the buffer is full
578 #define gc_config_log_buffer_size (1*1024) // TEMP
579 uint8_t* gc_config_log_buffer = 0;
580 size_t gc_config_log_buffer_offset = 0;
582 // For config since we log so little we keep the whole history. Also it's only
583 // ever logged by one thread so no need to synchronize.
584 void log_va_msg_config(const char *fmt, va_list args)
586 const int BUFFERSIZE = 256;
587 static char rgchBuffer[BUFFERSIZE];
588 char * pBuffer = &rgchBuffer[0];
591 int buffer_start = 1;
592 int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
593 assert (msg_len != -1);
594 msg_len += buffer_start;
596 if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
598 fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
599 fflush(gc_config_log);
600 gc_config_log_buffer_offset = 0;
603 memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len);
604 gc_config_log_buffer_offset += msg_len;
607 void GCLogConfig (const char *fmt, ... )
609 if (gc_config_log_on && (gc_config_log != NULL))
612 va_start( args, fmt );
613 log_va_msg_config (fmt, args);
616 #endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE
618 #ifdef SYNCHRONIZATION_STATS
620 // Number of GCs have we done since we last logged.
621 static unsigned int gc_count_during_log;
622 // In ms. This is how often we print out stats.
623 static const unsigned int log_interval = 5000;
624 // Time (in ms) when we start a new log interval.
625 static unsigned int log_start_tick;
626 static unsigned int gc_lock_contended;
627 static int64_t log_start_hires;
628 // Cycles accumulated in SuspendEE during log_interval.
629 static uint64_t suspend_ee_during_log;
630 // Cycles accumulated in RestartEE during log_interval.
631 static uint64_t restart_ee_during_log;
632 static uint64_t gc_during_log;
634 #endif //SYNCHRONIZATION_STATS
637 init_sync_log_stats()
639 #ifdef SYNCHRONIZATION_STATS
640 if (gc_count_during_log == 0)
642 gc_heap::init_sync_stats();
643 suspend_ee_during_log = 0;
644 restart_ee_during_log = 0;
646 gc_lock_contended = 0;
648 log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
649 log_start_hires = GCToOSInterface::QueryPerformanceCounter();
651 gc_count_during_log++;
652 #endif //SYNCHRONIZATION_STATS
656 process_sync_log_stats()
658 #ifdef SYNCHRONIZATION_STATS
660 unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;
662 if (log_elapsed > log_interval)
664 uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires;
665 // Print out the cycles we spent on average in each suspend and restart.
666 printf("\n_________________________________________________________________________________\n"
667 "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n"
668 "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n",
672 (unsigned int)(gc_during_log / gc_count_during_log),
673 (unsigned int)(suspend_ee_during_log / gc_count_during_log),
674 (unsigned int)(restart_ee_during_log / gc_count_during_log),
675 (double)(100.0f * gc_during_log / total));
676 gc_heap::print_sync_stats(gc_count_during_log);
678 gc_count_during_log = 0;
680 #endif //SYNCHRONIZATION_STATS
683 #ifdef MULTIPLE_HEAPS
687 gc_join_init_cpu_mapping = 0,
689 gc_join_generation_determined = 2,
690 gc_join_begin_mark_phase = 3,
691 gc_join_scan_dependent_handles = 4,
692 gc_join_rescan_dependent_handles = 5,
693 gc_join_scan_sizedref_done = 6,
694 gc_join_null_dead_short_weak = 7,
695 gc_join_scan_finalization = 8,
696 gc_join_null_dead_long_weak = 9,
697 gc_join_null_dead_syncblk = 10,
698 gc_join_decide_on_compaction = 11,
699 gc_join_rearrange_segs_compaction = 12,
700 gc_join_adjust_handle_age_compact = 13,
701 gc_join_adjust_handle_age_sweep = 14,
702 gc_join_begin_relocate_phase = 15,
703 gc_join_relocate_phase_done = 16,
704 gc_join_verify_objects_done = 17,
705 gc_join_start_bgc = 18,
706 gc_join_restart_ee = 19,
707 gc_join_concurrent_overflow = 20,
708 gc_join_suspend_ee = 21,
709 gc_join_bgc_after_ephemeral = 22,
710 gc_join_allow_fgc = 23,
711 gc_join_bgc_sweep = 24,
712 gc_join_suspend_ee_verify = 25,
713 gc_join_restart_ee_verify = 26,
714 gc_join_set_state_free = 27,
715 gc_r_join_update_card_bundle = 28,
716 gc_join_after_absorb = 29,
717 gc_join_verify_copy_table = 30,
718 gc_join_after_reset = 31,
719 gc_join_after_ephemeral_sweep = 32,
720 gc_join_after_profiler_heap_walk = 33,
721 gc_join_minimal_gc = 34,
722 gc_join_after_commit_soh_no_gc = 35,
723 gc_join_expand_loh_no_gc = 36,
724 gc_join_final_no_gc = 37,
725 gc_join_disable_software_write_watch = 38,
731 join_flavor_server_gc = 0,
735 #define first_thread_arrived 2
736 #pragma warning(push)
737 #pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads
738 struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure
740 // Shared non volatile keep on separate line to prevent eviction
743 // Keep polling/wait structures on separate line write once per join
744 DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
745 GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived.
746 Volatile<int> lock_color;
747 VOLATILE(BOOL) wait_done;
748 VOLATILE(BOOL) joined_p;
750 // Keep volatile counted locks on separate cache line write many per join
751 DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
752 VOLATILE(int32_t) join_lock;
753 VOLATILE(int32_t) r_join_lock;
763 type_first_r_join = 3,
775 join_heap_restart = 100,
776 join_heap_r_restart = 200
788 join_structure join_struct;
791 gc_join_flavor flavor;
794 uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq;
795 // remember join id and last thread to arrive so restart can use these
797 // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval
799 // counters for joins, in 1000's of clock cycles
800 uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max];
804 BOOL init (int n_th, gc_join_flavor f)
806 dprintf (JOIN_LOG, ("Initializing join structure"));
807 join_struct.n_threads = n_th;
808 join_struct.lock_color = 0;
809 for (int i = 0; i < 3; i++)
811 if (!join_struct.joined_event[i].IsValid())
813 join_struct.joined_p = FALSE;
814 dprintf (JOIN_LOG, ("Creating join event %d", i));
815 // TODO - changing this to a non OS event
816 // because this is also used by BGC threads which are
817 // managed threads and WaitEx does not allow you to wait
818 // for an OS event on a managed thread.
819 // But we are not sure if this plays well in the hosting
821 //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE);
822 if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE))
826 join_struct.join_lock = join_struct.n_threads;
827 join_struct.r_join_lock = join_struct.n_threads;
828 join_struct.wait_done = FALSE;
832 start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
840 dprintf (JOIN_LOG, ("Destroying join structure"));
841 for (int i = 0; i < 3; i++)
843 if (join_struct.joined_event[i].IsValid())
844 join_struct.joined_event[i].CloseEvent();
848 inline void fire_event (int heap, join_time time, join_type type, int join_id)
850 FIRE_EVENT(GCJoin_V2, heap, time, type, join_id);
853 void join (gc_heap* gch, int join_id)
856 // parallel execution ends here
857 end[gch->heap_number] = get_ts();
860 assert (!join_struct.joined_p);
861 int color = join_struct.lock_color.LoadWithoutBarrier();
863 if (Interlocked::Decrement(&join_struct.join_lock) != 0)
865 dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d",
866 flavor, join_id, (int32_t)(join_struct.join_lock)));
868 fire_event (gch->heap_number, time_start, type_join, join_id);
870 //busy wait around the color
871 if (color == join_struct.lock_color.LoadWithoutBarrier())
874 int spin_count = 128 * yp_spin_count_unit;
875 for (int j = 0; j < spin_count; j++)
877 if (color != join_struct.lock_color.LoadWithoutBarrier())
881 YieldProcessor(); // indicate to the processor that we are spinning
884 // we've spun, and if color still hasn't changed, fall into hard wait
885 if (color == join_struct.lock_color.LoadWithoutBarrier())
887 dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d",
888 flavor, join_id, color, (int32_t)(join_struct.join_lock)));
890 //Thread* current_thread = GCToEEInterface::GetThread();
891 //BOOL cooperative_mode = gc_heap::enable_preemptive ();
892 uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
893 //gc_heap::disable_preemptive (cooperative_mode);
895 if (dwJoinWait != WAIT_OBJECT_0)
897 STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
902 // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
903 if (color == join_struct.lock_color.LoadWithoutBarrier())
908 dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d",
909 flavor, join_id, (int32_t)(join_struct.join_lock)));
912 fire_event (gch->heap_number, time_end, type_join, join_id);
915 // parallel execution starts here
916 start[gch->heap_number] = get_ts();
917 Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number]));
922 fire_event (gch->heap_number, time_start, type_last_join, join_id);
924 join_struct.joined_p = TRUE;
925 dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
926 join_struct.joined_event[!color].Reset();
928 // this one is alone so it can proceed
930 // remember the join id, the last thread arriving, the start of the sequential phase,
931 // and keep track of the cycles spent waiting in the join
932 thd = gch->heap_number;
933 start_seq = get_ts();
934 Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number]));
939 // Reverse join - first thread gets here does the work; other threads will only proceed
940 // after the work is done.
941 // Note that you cannot call this twice in a row on the same thread. Plus there's no
942 // need to call it twice in row - you should just merge the work.
943 BOOL r_join (gc_heap* gch, int join_id)
946 if (join_struct.n_threads == 1)
951 if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0)
953 if (!join_struct.wait_done)
955 dprintf (JOIN_LOG, ("r_join() Waiting..."));
957 fire_event (gch->heap_number, time_start, type_join, join_id);
959 //busy wait around the color
960 if (!join_struct.wait_done)
963 int spin_count = 256 * yp_spin_count_unit;
964 for (int j = 0; j < spin_count; j++)
966 if (join_struct.wait_done)
970 YieldProcessor(); // indicate to the processor that we are spinning
973 // we've spun, and if color still hasn't changed, fall into hard wait
974 if (!join_struct.wait_done)
976 dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived));
977 uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE);
978 if (dwJoinWait != WAIT_OBJECT_0)
980 STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
985 // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
986 if (!join_struct.wait_done)
991 dprintf (JOIN_LOG, ("r_join() done"));
994 fire_event (gch->heap_number, time_end, type_join, join_id);
1001 fire_event (gch->heap_number, time_start, type_first_r_join, join_id);
1009 return GCToOSInterface::QueryPerformanceCounter();
1012 void start_ts (gc_heap* gch)
1014 // parallel execution ends here
1015 start[gch->heap_number] = get_ts();
1022 uint64_t elapsed_seq = get_ts() - start_seq;
1023 uint64_t max = 0, sum = 0, wake = 0;
1024 uint64_t min_ts = start[0];
1025 for (int i = 1; i < join_struct.n_threads; i++)
1027 if(min_ts > start[i]) min_ts = start[i];
1030 for (int i = 0; i < join_struct.n_threads; i++)
1032 uint64_t wake_delay = start[i] - min_ts;
1033 uint64_t elapsed = end[i] - start[i];
1039 uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq;
1040 uint64_t par_loss = join_struct.n_threads*max - sum;
1041 double efficiency = 0.0;
1043 efficiency = sum*100.0/(join_struct.n_threads*max);
1045 const double ts_scale = 1e-6;
1047 // enable this printf to get statistics on each individual join as it occurs
1048 // printf("join #%3d seq_loss = %5g par_loss = %5g efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
1050 elapsed_total[id] += sum;
1051 wake_total[id] += wake;
1052 seq_loss_total[id] += seq_loss;
1053 par_loss_total[id] += par_loss;
1055 // every 10 seconds, print a summary of the time spent in each type of join
1056 if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
1058 printf("**** summary *****\n");
1059 for (int i = 0; i < 16; i++)
1061 printf("join #%3d elapsed_total = %8g wake_loss = %8g seq_loss = %8g par_loss = %8g in_join_total = %8g\n",
1063 ts_scale*elapsed_total[i],
1064 ts_scale*wake_total[i],
1065 ts_scale*seq_loss_total[i],
1066 ts_scale*par_loss_total[i],
1067 ts_scale*in_join_total[i]);
1068 elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
1070 start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
1074 fire_event (join_heap_restart, time_start, type_restart, -1);
1075 assert (join_struct.joined_p);
1076 join_struct.joined_p = FALSE;
1077 join_struct.join_lock = join_struct.n_threads;
1078 dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1079 // printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
1080 int color = join_struct.lock_color.LoadWithoutBarrier();
1081 join_struct.lock_color = !color;
1082 join_struct.joined_event[color].Set();
1084 // printf("Set joined_event %d\n", !join_struct.lock_color);
1086 fire_event (join_heap_restart, time_end, type_restart, -1);
1089 start[thd] = get_ts();
1095 dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
1096 return join_struct.joined_p;
1101 if (join_struct.n_threads != 1)
1103 fire_event (join_heap_r_restart, time_start, type_restart, -1);
1104 join_struct.wait_done = TRUE;
1105 join_struct.joined_event[first_thread_arrived].Set();
1106 fire_event (join_heap_r_restart, time_end, type_restart, -1);
1112 if (join_struct.n_threads != 1)
1114 join_struct.r_join_lock = join_struct.n_threads;
1115 join_struct.wait_done = FALSE;
1116 join_struct.joined_event[first_thread_arrived].Reset();
1123 #ifdef BACKGROUND_GC
1125 #endif //BACKGROUND_GC
1127 #endif //MULTIPLE_HEAPS
1129 #define spin_and_switch(count_to_spin, expr) \
1131 for (int j = 0; j < count_to_spin; j++) \
1141 GCToOSInterface::YieldThread(0); \
1145 #ifndef DACCESS_COMPILE
1146 #ifdef BACKGROUND_GC
1148 #define max_pending_allocs 64
1150 class exclusive_sync
1152 // TODO - verify that this is the right syntax for Volatile.
1153 VOLATILE(uint8_t*) rwp_object;
1154 VOLATILE(int32_t) needs_checking;
1158 uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)];
1160 // TODO - perhaps each object should be on its own cache line...
1161 VOLATILE(uint8_t*) alloc_objects[max_pending_allocs];
1163 int find_free_index ()
1165 for (int i = 0; i < max_pending_allocs; i++)
1167 if (alloc_objects [i] == (uint8_t*)0)
1179 spin_count = 32 * (g_num_processors - 1);
1182 for (int i = 0; i < max_pending_allocs; i++)
1184 alloc_objects [i] = (uint8_t*)0;
1190 for (int i = 0; i < max_pending_allocs; i++)
1192 if (alloc_objects [i] != (uint8_t*)0)
1194 GCToOSInterface::DebugBreak();
1199 void bgc_mark_set (uint8_t* obj)
1201 dprintf (3, ("cm: probing %Ix", obj));
1203 if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1205 // If we spend too much time spending all the allocs,
1206 // consider adding a high water mark and scan up
1207 // to that; we'll need to interlock in done when
1208 // we update the high watermark.
1209 for (int i = 0; i < max_pending_allocs; i++)
1211 if (obj == alloc_objects[i])
1214 dprintf (3, ("cm: will spin"));
1215 spin_and_switch (spin_count, (obj != alloc_objects[i]));
1222 dprintf (3, ("cm: set %Ix", obj));
1227 spin_and_switch (spin_count, (needs_checking == 0));
1232 int loh_alloc_set (uint8_t* obj)
1234 if (!gc_heap::cm_in_progress)
1240 dprintf (3, ("loh alloc: probing %Ix", obj));
1242 if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
1244 if (obj == rwp_object)
1247 spin_and_switch (spin_count, (obj != rwp_object));
1252 int cookie = find_free_index();
1256 alloc_objects[cookie] = obj;
1260 // GCToOSInterface::DebugBreak();
1263 dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
1269 dprintf (3, ("loh alloc: setting %Ix will spin to acquire a free index", obj));
1270 spin_and_switch (spin_count, (find_free_index () != -1));
1277 dprintf (3, ("loh alloc: will spin on checking %Ix", obj));
1278 spin_and_switch (spin_count, (needs_checking == 0));
1283 void bgc_mark_done ()
1285 dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object));
1289 void loh_alloc_done_with_index (int index)
1291 dprintf (3, ("loh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index));
1292 assert ((index >= 0) && (index < max_pending_allocs));
1293 alloc_objects[index] = (uint8_t*)0;
1296 void loh_alloc_done (uint8_t* obj)
1298 #ifdef BACKGROUND_GC
1299 if (!gc_heap::cm_in_progress)
1304 for (int i = 0; i < max_pending_allocs; i++)
1306 if (alloc_objects [i] == obj)
1308 dprintf (3, ("loh alloc: release lock on %Ix at %d", (uint8_t *)alloc_objects[i], i));
1309 alloc_objects[i] = (uint8_t*)0;
1313 #endif //BACKGROUND_GC
1317 // Note that this class was written assuming just synchronization between
1318 // one background GC thread and multiple user threads that might request
1319 // an FGC - it does not take into account what kind of locks the multiple
1320 // user threads might be holding at the time (eg, there could only be one
1321 // user thread requesting an FGC because it needs to take gc_lock first)
1322 // so you'll see checks that may not be necessary if you take those conditions
1323 // into consideration.
1325 // With the introduction of Server Background GC we no longer use this
1326 // class to do synchronization between FGCs and BGC.
1327 class recursive_gc_sync
1329 static VOLATILE(int32_t) foreground_request_count;//initial state 0
1330 static VOLATILE(BOOL) gc_background_running; //initial state FALSE
1331 static VOLATILE(int32_t) foreground_count; // initial state 0;
1332 static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
1333 static GCEvent foreground_complete;//Auto Reset
1334 static GCEvent foreground_allowed;//Auto Reset
1336 static void begin_background();
1337 static void end_background();
1338 static void begin_foreground();
1339 static void end_foreground();
1340 BOOL allow_foreground ();
1342 static void shutdown();
1343 static BOOL background_running_p() {return gc_background_running;}
1346 VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
1347 VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
1348 VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
1349 VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
1350 GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
1351 GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset
1353 BOOL recursive_gc_sync::init ()
1355 foreground_request_count = 0;
1356 foreground_count = 0;
1357 gc_background_running = FALSE;
1358 foreground_gate = 0;
1360 if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
1364 if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
1376 void recursive_gc_sync::shutdown()
1378 if (foreground_complete.IsValid())
1379 foreground_complete.CloseEvent();
1380 if (foreground_allowed.IsValid())
1381 foreground_allowed.CloseEvent();
1384 void recursive_gc_sync::begin_background()
1386 dprintf (2, ("begin background"));
1387 foreground_request_count = 1;
1388 foreground_count = 1;
1389 foreground_allowed.Reset();
1390 gc_background_running = TRUE;
1392 void recursive_gc_sync::end_background()
1394 dprintf (2, ("end background"));
1395 gc_background_running = FALSE;
1396 foreground_gate = 1;
1397 foreground_allowed.Set();
1400 void recursive_gc_sync::begin_foreground()
1402 dprintf (2, ("begin_foreground"));
1404 bool cooperative_mode = false;
1405 if (gc_background_running)
1407 gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
1408 gc_heap::alloc_wait_event_p = TRUE;
1412 Interlocked::Increment (&foreground_request_count);
1415 dprintf(2, ("Waiting sync gc point"));
1416 assert (foreground_allowed.IsValid());
1417 assert (foreground_complete.IsValid());
1419 cooperative_mode = gc_heap::enable_preemptive ();
1421 foreground_allowed.Wait(INFINITE, FALSE);
1423 dprintf(2, ("Waiting sync gc point is done"));
1425 gc_heap::disable_preemptive (cooperative_mode);
1427 if (foreground_gate)
1429 Interlocked::Increment (&foreground_count);
1430 dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
1431 if (foreground_gate)
1433 gc_heap::settings.concurrent = FALSE;
1444 goto try_again_no_inc;
1449 void recursive_gc_sync::end_foreground()
1451 dprintf (2, ("end_foreground"));
1452 if (gc_background_running)
1454 Interlocked::Decrement (&foreground_request_count);
1455 dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
1456 if (Interlocked::Decrement (&foreground_count) == 0)
1458 //c_write ((BOOL*)&foreground_gate, 0);
1459 // TODO - couldn't make the syntax work with Volatile<T>
1460 foreground_gate = 0;
1461 if (foreground_count == 0)
1463 foreground_allowed.Reset ();
1464 dprintf(2, ("setting foreground complete event"));
1465 foreground_complete.Set();
1472 BOOL recursive_gc_sync::allow_foreground()
1474 assert (gc_heap::settings.concurrent);
1475 dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
1476 (int32_t)foreground_request_count, (int32_t)foreground_count));
1478 BOOL did_fgc = FALSE;
1480 //if we have suspended the EE, just return because
1481 //some thread could be waiting on this to proceed.
1482 if (!GCHeap::GcInProgress)
1484 //TODO BACKGROUND_GC This is to stress the concurrency between
1485 //background and foreground
1486 // gc_heap::disallow_new_allocation (0);
1488 //GCToOSInterface::YieldThread(0);
1491 if (foreground_request_count != 0)
1493 //foreground wants to run
1494 //save the important settings
1495 //TODO BACKGROUND_GC be more selective about the important settings.
1496 gc_mechanisms saved_settings = gc_heap::settings;
1500 //c_write ((BOOL*)&foreground_gate, 1);
1501 // TODO - couldn't make the syntax work with Volatile<T>
1502 foreground_gate = 1;
1503 foreground_allowed.Set ();
1504 foreground_complete.Wait (INFINITE, FALSE);
1505 }while (/*foreground_request_count ||*/ foreground_gate);
1507 assert (!foreground_gate);
1509 //restore the important settings
1510 gc_heap::settings = saved_settings;
1511 GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
1512 //the background GC shouldn't be using gc_high and gc_low
1513 //gc_low = lowest_address;
1514 //gc_high = highest_address;
1517 //TODO BACKGROUND_GC This is to stress the concurrency between
1518 //background and foreground
1519 // gc_heap::allow_new_allocation (0);
1523 dprintf (100, ("leave allow_foreground"));
1524 assert (gc_heap::settings.concurrent);
1528 #endif //BACKGROUND_GC
1529 #endif //DACCESS_COMPILE
1532 #if defined(COUNT_CYCLES)
1534 #pragma warning(disable:4035)
1538 unsigned GetCycleCount32() // enough for about 40 seconds
1546 #pragma warning(default:4035)
1548 #endif //COUNT_CYCLES
1551 int mark_time, plan_time, sweep_time, reloc_time, compact_time;
1554 #ifndef MULTIPLE_HEAPS
1556 #endif // MULTIPLE_HEAPS
1558 void reset_memory (uint8_t* o, size_t sizeo);
1562 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1563 static bool virtual_alloc_hardware_write_watch = false;
1564 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1566 static bool hardware_write_watch_capability = false;
1568 #ifndef DACCESS_COMPILE
1570 //check if the write watch APIs are supported.
1572 void hardware_write_watch_api_supported()
1574 if (GCToOSInterface::SupportsWriteWatch())
1576 hardware_write_watch_capability = true;
1577 dprintf (2, ("WriteWatch supported"));
1581 dprintf (2,("WriteWatch not supported"));
1585 #endif //!DACCESS_COMPILE
1587 inline bool can_use_hardware_write_watch()
1589 return hardware_write_watch_capability;
1592 inline bool can_use_write_watch_for_gc_heap()
1594 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1596 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1597 return can_use_hardware_write_watch();
1598 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1601 inline bool can_use_write_watch_for_card_table()
1603 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
1606 return can_use_hardware_write_watch();
1611 #define mem_reserve (MEM_RESERVE)
1612 #endif //WRITE_WATCH
1614 //check if the low memory notification is supported
1616 #ifndef DACCESS_COMPILE
1618 void WaitLongerNoInstru (int i)
1620 // every 8th attempt:
1621 bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1623 // if we're waiting for gc to finish, we should block immediately
1624 if (g_fSuspensionPending == 0)
1626 if (g_num_processors > 1)
1628 YieldProcessor(); // indicate to the processor that we are spinning
1630 GCToOSInterface::YieldThread (0);
1632 GCToOSInterface::Sleep (5);
1635 GCToOSInterface::Sleep (5);
1638 // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1639 // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1640 // It is important that the thread is going to wait for GC. Otherwise the thread
1641 // is in a tight loop. If the thread has high priority, the perf is going to be very BAD.
1645 // In debug builds, all enter_spin_lock operations go through this code. If a GC has
1646 // started, it is important to block until the GC thread calls set_gc_done (since it is
1647 // guaranteed to have cleared g_TrapReturningThreads by this point). This avoids livelock
1648 // conditions which can otherwise occur if threads are allowed to spin in this function
1649 // (and therefore starve the GC thread) between the point when the GC thread sets the
1650 // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads.
1651 if (gc_heap::gc_started)
1653 gc_heap::wait_for_gc_done();
1656 GCToEEInterface::DisablePreemptiveGC();
1658 else if (g_fSuspensionPending > 0)
1660 g_theGCHeap->WaitUntilGCComplete();
1665 static void safe_switch_to_thread()
1667 bool cooperative_mode = gc_heap::enable_preemptive();
1669 GCToOSInterface::YieldThread(0);
1671 gc_heap::disable_preemptive(cooperative_mode);
1675 // We need the following methods to have volatile arguments, so that they can accept
1676 // raw pointers in addition to the results of the & operator on Volatile<T>.
1679 static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1683 if (Interlocked::CompareExchange(lock, 0, -1) >= 0)
1686 while (VolatileLoad(lock) >= 0)
1688 if ((++i & 7) && !IsGCInProgress())
1690 if (g_num_processors > 1)
1692 #ifndef MULTIPLE_HEAPS
1693 int spin_count = 32 * yp_spin_count_unit;
1694 #else //!MULTIPLE_HEAPS
1695 int spin_count = yp_spin_count_unit;
1696 #endif //!MULTIPLE_HEAPS
1697 for (int j = 0; j < spin_count; j++)
1699 if (VolatileLoad(lock) < 0 || IsGCInProgress())
1701 YieldProcessor(); // indicate to the processor that we are spinning
1703 if (VolatileLoad(lock) >= 0 && !IsGCInProgress())
1705 safe_switch_to_thread();
1710 safe_switch_to_thread();
1715 WaitLongerNoInstru(i);
1723 static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
1725 return (Interlocked::CompareExchange(&*lock, 0, -1) < 0);
1729 static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
1731 VolatileStore<int32_t>((int32_t*)lock, -1);
1737 static void enter_spin_lock(GCSpinLock *pSpinLock)
1739 enter_spin_lock_noinstru(&pSpinLock->lock);
1740 assert (pSpinLock->holding_thread == (Thread*)-1);
1741 pSpinLock->holding_thread = GCToEEInterface::GetThread();
1745 static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock)
1747 BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock);
1749 pSpinLock->holding_thread = GCToEEInterface::GetThread();
1754 static void leave_spin_lock(GCSpinLock *pSpinLock)
1756 bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC();
1757 // _ASSERTE((pSpinLock->holding_thread == GCToEEInterface::GetThread()) || gc_thread_p || pSpinLock->released_by_gc_p);
1758 pSpinLock->released_by_gc_p = gc_thread_p;
1759 pSpinLock->holding_thread = (Thread*) -1;
1760 if (pSpinLock->lock != -1)
1761 leave_spin_lock_noinstru(&pSpinLock->lock);
1764 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \
1765 _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread());
1767 #define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \
1768 _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread());
1772 //In the concurrent version, the Enable/DisablePreemptiveGC is optional because
1773 //the gc thread call WaitLonger.
1774 void WaitLonger (int i
1775 #ifdef SYNCHRONIZATION_STATS
1776 , GCSpinLock* spin_lock
1777 #endif //SYNCHRONIZATION_STATS
1780 #ifdef SYNCHRONIZATION_STATS
1781 (spin_lock->num_wait_longer)++;
1782 #endif //SYNCHRONIZATION_STATS
1784 // every 8th attempt:
1785 bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
1788 // if we're waiting for gc to finish, we should block immediately
1789 if (!gc_heap::gc_started)
1791 #ifdef SYNCHRONIZATION_STATS
1792 (spin_lock->num_switch_thread_w)++;
1793 #endif //SYNCHRONIZATION_STATS
1794 if (g_num_processors > 1)
1796 YieldProcessor(); // indicate to the processor that we are spinning
1798 GCToOSInterface::YieldThread (0);
1800 GCToOSInterface::Sleep (5);
1803 GCToOSInterface::Sleep (5);
1806 // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
1807 // or it has no Thread object, in order to force a task to yield, or to triger a GC.
1808 // It is important that the thread is going to wait for GC. Otherwise the thread
1809 // is in a tight loop. If the thread has high priority, the perf is going to be very BAD.
1810 if (gc_heap::gc_started)
1812 gc_heap::wait_for_gc_done();
1817 #ifdef SYNCHRONIZATION_STATS
1818 (spin_lock->num_disable_preemptive_w)++;
1819 #endif //SYNCHRONIZATION_STATS
1820 GCToEEInterface::DisablePreemptiveGC();
1825 static void enter_spin_lock (GCSpinLock* spin_lock)
1829 if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0)
1832 while (spin_lock->lock >= 0)
1834 if ((++i & 7) && !gc_heap::gc_started)
1836 if (g_num_processors > 1)
1838 #ifndef MULTIPLE_HEAPS
1839 int spin_count = 32 * yp_spin_count_unit;
1840 #else //!MULTIPLE_HEAPS
1841 int spin_count = yp_spin_count_unit;
1842 #endif //!MULTIPLE_HEAPS
1843 for (int j = 0; j < spin_count; j++)
1845 if (spin_lock->lock < 0 || gc_heap::gc_started)
1847 YieldProcessor(); // indicate to the processor that we are spinning
1849 if (spin_lock->lock >= 0 && !gc_heap::gc_started)
1851 #ifdef SYNCHRONIZATION_STATS
1852 (spin_lock->num_switch_thread)++;
1853 #endif //SYNCHRONIZATION_STATS
1854 bool cooperative_mode = gc_heap::enable_preemptive ();
1856 GCToOSInterface::YieldThread(0);
1858 gc_heap::disable_preemptive (cooperative_mode);
1862 GCToOSInterface::YieldThread(0);
1867 #ifdef SYNCHRONIZATION_STATS
1869 #endif //SYNCHRONIZATION_STATS
1877 inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
1879 return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0);
1883 static void leave_spin_lock (GCSpinLock * spin_lock)
1885 spin_lock->lock = -1;
1888 #define ASSERT_HOLDING_SPIN_LOCK(pSpinLock)
1892 bool gc_heap::enable_preemptive ()
1894 return GCToEEInterface::EnablePreemptiveGC();
1897 void gc_heap::disable_preemptive (bool restore_cooperative)
1899 if (restore_cooperative)
1901 GCToEEInterface::DisablePreemptiveGC();
1905 #endif // !DACCESS_COMPILE
1907 typedef void ** PTR_PTR;
1908 //This function clears a piece of memory
1909 // size has to be Dword aligned
1912 void memclr ( uint8_t* mem, size_t size)
1914 dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
1915 assert ((size & (sizeof(PTR_PTR)-1)) == 0);
1916 assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1919 // The compiler will recognize this pattern and replace it with memset call. We can as well just call
1920 // memset directly to make it obvious what's going on.
1921 PTR_PTR m = (PTR_PTR) mem;
1922 for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
1926 memset (mem, 0, size);
1929 void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
1931 const size_t sz4ptr = sizeof(PTR_PTR)*4;
1932 const size_t sz2ptr = sizeof(PTR_PTR)*2;
1933 const size_t sz1ptr = sizeof(PTR_PTR)*1;
1935 // size must be a multiple of the pointer size
1936 assert ((size & (sizeof (PTR_PTR)-1)) == 0);
1937 assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
1939 // copy in groups of four pointer sized things at a time
1944 ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1945 ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1946 ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2];
1947 ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3];
1951 while ((size -= sz4ptr) >= sz4ptr);
1954 // still two pointer sized things or more left to copy?
1957 ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1958 ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
1963 // still one pointer sized thing left to copy?
1966 ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
1974 ptrdiff_t round_down (ptrdiff_t add, int pitch)
1976 return ((add / pitch) * pitch);
1979 #if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT)
1980 // FEATURE_STRUCTALIGN allows the compiler to dictate the alignment,
1981 // i.e, if a larger alignment matters or is beneficial, the compiler
1982 // generated info tells us so. RESPECT_LARGE_ALIGNMENT is just the
1983 // converse - it's a heuristic for the GC to use a larger alignment.
1984 #error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT
1987 #if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION)
1988 #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
1991 #if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
1992 #error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
1996 BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
1998 #ifdef RESPECT_LARGE_ALIGNMENT
1999 return ((((size_t)p1 ^ (size_t)p2) & 7) == 0);
2001 UNREFERENCED_PARAMETER(p1);
2002 UNREFERENCED_PARAMETER(p2);
2004 #endif //RESPECT_LARGE_ALIGNMENT
2008 size_t switch_alignment_size (BOOL already_padded_p)
2010 if (already_padded_p)
2011 return DATA_ALIGNMENT;
2013 return (Align (min_obj_size) +((Align (min_obj_size)&DATA_ALIGNMENT)^DATA_ALIGNMENT));
2017 #ifdef FEATURE_STRUCTALIGN
2018 void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad);
2019 void clear_node_aligninfo (uint8_t *node);
2020 #else // FEATURE_STRUCTALIGN
2021 #define node_realigned(node) (((plug_and_reloc*)(node))[-1].reloc & 1)
2022 void set_node_realigned (uint8_t* node);
2023 void clear_node_realigned(uint8_t* node);
2024 #endif // FEATURE_STRUCTALIGN
2027 size_t AlignQword (size_t nbytes)
2029 #ifdef FEATURE_STRUCTALIGN
2030 // This function is used to align everything on the large object
2031 // heap to an 8-byte boundary, to reduce the number of unaligned
2032 // accesses to (say) arrays of doubles. With FEATURE_STRUCTALIGN,
2033 // the compiler dictates the optimal alignment instead of having
2034 // a heuristic in the GC.
2035 return Align (nbytes);
2036 #else // FEATURE_STRUCTALIGN
2037 return (nbytes + 7) & ~7;
2038 #endif // FEATURE_STRUCTALIGN
2042 BOOL Aligned (size_t n)
2044 return (n & ALIGNCONST) == 0;
2047 #define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *))
2049 #ifdef FEATURE_STRUCTALIGN
2050 #define MAX_STRUCTALIGN OS_PAGE_SIZE
2051 #else // FEATURE_STRUCTALIGN
2052 #define MAX_STRUCTALIGN 0
2053 #endif // FEATURE_STRUCTALIGN
2055 #ifdef FEATURE_STRUCTALIGN
2057 ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment)
2059 // The resulting alignpad must be either 0 or at least min_obj_size.
2060 // Note that by computing the following difference on unsigned types,
2061 // we can do the range check 0 < alignpad < min_obj_size with a
2062 // single conditional branch.
2063 if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT)
2065 return requiredAlignment;
2071 uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2073 // required alignment must be a power of two
2074 _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0);
2075 _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0);
2076 _ASSERTE(requiredAlignment >= sizeof(void *));
2077 _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN);
2079 // When this method is invoked for individual objects (i.e., alignmentOffset
2080 // is just the size of the PostHeader), what needs to be aligned when
2081 // we're done is the pointer to the payload of the object (which means
2082 // the actual resulting object pointer is typically not aligned).
2084 uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset;
2085 ptrdiff_t alignpad = result - origPtr;
2087 return result + AdjustmentForMinPadSize (alignpad, requiredAlignment);
2091 ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
2093 return StructAlign (plug, requiredAlignment, alignmentOffset) - plug;
2096 BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment)
2098 return StructAlign (ptr, requiredAlignment) == ptr;
2102 ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment)
2104 if (requiredAlignment == DATA_ALIGNMENT)
2106 // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the
2107 // alignment padding object), the worst-case alignment padding is correspondingly larger
2108 // than the required alignment.
2109 return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT;
2113 ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment)
2115 if (requiredAlignment <= get_alignment_constant (TRUE)+1)
2117 // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space
2118 // for padding before the actual object, it also leaves space for filling a gap after the
2119 // actual object. This is needed on the large object heap, as the outer allocation functions
2120 // don't operate on an allocation context (which would have left space for the final gap).
2121 return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT;
2124 uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext)
2126 uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2127 if (alignedPtr != newAlloc) {
2128 make_unused_array (newAlloc, alignedPtr - newAlloc);
2130 acontext->alloc_ptr = alignedPtr + Align (size);
2134 uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size)
2136 uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
2137 if (alignedPtr != newAlloc) {
2138 make_unused_array (newAlloc, alignedPtr - newAlloc);
2140 if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) {
2141 make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr);
2145 #else // FEATURE_STRUCTALIGN
2146 #define ComputeMaxStructAlignPad(requiredAlignment) 0
2147 #define ComputeMaxStructAlignPadLarge(requiredAlignment) 0
2148 #endif // FEATURE_STRUCTALIGN
2150 //CLR_SIZE is the max amount of bytes from gen0 that is set to 0 in one chunk
2152 #define CLR_SIZE ((size_t)(8*1024))
2154 #define CLR_SIZE ((size_t)(8*1024))
2157 #define END_SPACE_AFTER_GC (loh_size_threshold + MAX_STRUCTALIGN)
2159 #ifdef BACKGROUND_GC
2160 #define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE)
2162 #define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE)
2163 #endif //BACKGROUND_GC
2165 // This is always power of 2.
2166 const size_t min_segment_size_hard_limit = 1024*1024*16;
2172 #define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024))
2173 #define LHEAP_ALLOC ((size_t)(1024*1024*256))
2177 #define INITIAL_ALLOC ((size_t)(1024*1024*64))
2178 #define LHEAP_ALLOC ((size_t)(1024*1024*32))
2186 #define INITIAL_ALLOC ((size_t)(1024*1024*256))
2187 #define LHEAP_ALLOC ((size_t)(1024*1024*128))
2191 #define INITIAL_ALLOC ((size_t)(1024*1024*16))
2192 #define LHEAP_ALLOC ((size_t)(1024*1024*16))
2198 //amount in bytes of the etw allocation tick
2199 const size_t etw_allocation_tick = 100*1024;
2201 const size_t low_latency_alloc = 256*1024;
2203 const size_t fgn_check_quantum = 2*1024*1024;
2206 const int max_snoop_level = 128;
2211 //threshold of heap size to turn on card bundles.
2212 #define SH_TH_CARD_BUNDLE (40*1024*1024)
2213 #define MH_TH_CARD_BUNDLE (180*1024*1024)
2214 #endif //CARD_BUNDLE
2216 #define GC_EPHEMERAL_DECOMMIT_TIMEOUT 5000
2219 size_t align_on_page (size_t add)
2221 return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1));
2225 uint8_t* align_on_page (uint8_t* add)
2227 return (uint8_t*)align_on_page ((size_t) add);
2231 size_t align_lower_page (size_t add)
2233 return (add & ~((size_t)OS_PAGE_SIZE - 1));
2237 uint8_t* align_lower_page (uint8_t* add)
2239 return (uint8_t*)align_lower_page ((size_t)add);
2243 size_t align_write_watch_lower_page (size_t add)
2245 return (add & ~(WRITE_WATCH_UNIT_SIZE - 1));
2249 uint8_t* align_write_watch_lower_page (uint8_t* add)
2251 return (uint8_t*)align_lower_page ((size_t)add);
2256 BOOL power_of_two_p (size_t integer)
2258 return !(integer & (integer-1));
2262 BOOL oddp (size_t integer)
2264 return (integer & 1) != 0;
2267 // we only ever use this for WORDs.
2268 size_t logcount (size_t word)
2270 //counts the number of high bits in a 16 bit word.
2271 assert (word < 0x10000);
2273 count = (word & 0x5555) + ( (word >> 1 ) & 0x5555);
2274 count = (count & 0x3333) + ( (count >> 2) & 0x3333);
2275 count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F);
2276 count = (count & 0x00FF) + ( (count >> 8) & 0x00FF);
2280 #ifndef DACCESS_COMPILE
2282 void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
2284 WriteBarrierParameters args = {};
2285 args.operation = WriteBarrierOp::StompResize;
2286 args.is_runtime_suspended = is_runtime_suspended;
2287 args.requires_upper_bounds_check = requires_upper_bounds_check;
2289 args.card_table = g_gc_card_table;
2290 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2291 args.card_bundle_table = g_gc_card_bundle_table;
2294 args.lowest_address = g_gc_lowest_address;
2295 args.highest_address = g_gc_highest_address;
2297 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2298 if (SoftwareWriteWatch::IsEnabledForGCHeap())
2300 args.write_watch_table = g_gc_sw_ww_table;
2302 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
2304 GCToEEInterface::StompWriteBarrier(&args);
2307 void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2309 WriteBarrierParameters args = {};
2310 args.operation = WriteBarrierOp::StompEphemeral;
2311 args.is_runtime_suspended = true;
2312 args.ephemeral_low = ephemeral_low;
2313 args.ephemeral_high = ephemeral_high;
2314 GCToEEInterface::StompWriteBarrier(&args);
2317 void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
2319 WriteBarrierParameters args = {};
2320 args.operation = WriteBarrierOp::Initialize;
2321 args.is_runtime_suspended = true;
2322 args.requires_upper_bounds_check = false;
2323 args.card_table = g_gc_card_table;
2325 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
2326 args.card_bundle_table = g_gc_card_bundle_table;
2329 args.lowest_address = g_gc_lowest_address;
2330 args.highest_address = g_gc_highest_address;
2331 args.ephemeral_low = ephemeral_low;
2332 args.ephemeral_high = ephemeral_high;
2333 GCToEEInterface::StompWriteBarrier(&args);
2336 #endif // DACCESS_COMPILE
2338 //extract the low bits [0,low[ of a uint32_t
2339 #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
2340 //extract the high bits [high, 32] of a uint32_t
2341 #define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1))
2343 // Things we need to manually initialize:
2344 // gen0 min_size - based on cache
2345 // gen0/1 max_size - based on segment size
2346 static static_data static_data_table[latency_level_last - latency_level_first + 1][NUMBERGENERATIONS] =
2348 // latency_level_memory_footprint
2351 {0, 0, 40000, 0.5f, 9.0f, 20.0f, 1000, 1},
2353 {160*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2355 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2357 {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2360 // latency_level_balanced
2364 #ifdef MULTIPLE_HEAPS
2368 #endif //MULTIPLE_HEAPS
2371 {256*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
2373 {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
2375 {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
2382 class CObjectHeader;
2386 class c_synchronize;
2388 #ifdef FEATURE_PREMORTEM_FINALIZATION
2389 #ifndef DACCESS_COMPILE
2391 HRESULT AllocateCFinalize(CFinalize **pCFinalize);
2392 #endif //!DACCESS_COMPILE
2393 #endif // FEATURE_PREMORTEM_FINALIZATION
2395 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address);
2398 #ifdef USE_INTROSORT
2399 #define _sort introsort::sort
2400 #else //USE_INTROSORT
2401 #define _sort qsort1
2402 void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
2403 #endif //USE_INTROSORT
2405 void* virtual_alloc (size_t size);
2406 void virtual_free (void* add, size_t size);
2408 /* per heap static initialization */
2410 #ifndef MULTIPLE_HEAPS
2411 uint32_t* gc_heap::mark_array;
2412 #endif //MULTIPLE_HEAPS
2416 uint8_t** gc_heap::g_mark_list;
2418 #ifdef PARALLEL_MARK_LIST_SORT
2419 uint8_t** gc_heap::g_mark_list_copy;
2420 #endif //PARALLEL_MARK_LIST_SORT
2422 size_t gc_heap::mark_list_size;
2425 #ifdef SEG_MAPPING_TABLE
2426 seg_mapping* seg_mapping_table;
2427 #endif //SEG_MAPPING_TABLE
2429 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
2430 sorted_table* gc_heap::seg_table;
2431 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
2433 #ifdef MULTIPLE_HEAPS
2434 GCEvent gc_heap::ee_suspend_event;
2435 size_t gc_heap::min_balance_threshold = 0;
2436 #endif //MULTIPLE_HEAPS
2438 VOLATILE(BOOL) gc_heap::gc_started;
2440 #ifdef MULTIPLE_HEAPS
2442 GCEvent gc_heap::gc_start_event;
2443 bool gc_heap::gc_thread_no_affinitize_p = false;
2444 uintptr_t process_mask = 0;
2446 int gc_heap::n_heaps;
2448 gc_heap** gc_heap::g_heaps;
2450 size_t* gc_heap::g_promoted;
2453 int* gc_heap::g_mark_stack_busy;
2457 #ifdef BACKGROUND_GC
2458 size_t* gc_heap::g_bpromoted;
2459 #endif //BACKGROUND_GC
2461 #else //MULTIPLE_HEAPS
2463 size_t gc_heap::g_promoted;
2465 #ifdef BACKGROUND_GC
2466 size_t gc_heap::g_bpromoted;
2467 #endif //BACKGROUND_GC
2469 #endif //MULTIPLE_HEAPS
2471 size_t gc_heap::reserved_memory = 0;
2472 size_t gc_heap::reserved_memory_limit = 0;
2473 BOOL gc_heap::g_low_memory_status;
2475 #ifndef DACCESS_COMPILE
2476 static gc_reason gc_trigger_reason = reason_empty;
2477 #endif //DACCESS_COMPILE
2479 gc_latency_level gc_heap::latency_level = latency_level_default;
2481 gc_mechanisms gc_heap::settings;
2483 gc_history_global gc_heap::gc_data_global;
2485 size_t gc_heap::gc_last_ephemeral_decommit_time = 0;
2487 size_t gc_heap::gc_gen0_desired_high;
2489 CLRCriticalSection gc_heap::check_commit_cs;
2491 size_t gc_heap::current_total_committed = 0;
2493 size_t gc_heap::current_total_committed_bookkeeping = 0;
2496 double gc_heap::short_plugs_pad_ratio = 0;
2497 #endif //SHORT_PLUGS
2500 #define MAX_ALLOWED_MEM_LOAD 85
2502 // consider putting this in dynamic data -
2503 // we may want different values for workstation
2505 #define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024)
2507 size_t gc_heap::youngest_gen_desired_th;
2510 uint32_t gc_heap::last_gc_memory_load = 0;
2512 size_t gc_heap::last_gc_heap_size = 0;
2514 size_t gc_heap::last_gc_fragmentation = 0;
2516 uint64_t gc_heap::mem_one_percent = 0;
2518 uint32_t gc_heap::high_memory_load_th = 0;
2520 uint32_t gc_heap::m_high_memory_load_th;
2522 uint32_t gc_heap::v_high_memory_load_th;
2524 uint64_t gc_heap::total_physical_mem = 0;
2526 uint64_t gc_heap::entry_available_physical_mem = 0;
2528 size_t gc_heap::heap_hard_limit = 0;
2530 #ifdef BACKGROUND_GC
2531 GCEvent gc_heap::bgc_start_event;
2533 gc_mechanisms gc_heap::saved_bgc_settings;
2535 GCEvent gc_heap::background_gc_done_event;
2537 GCEvent gc_heap::ee_proceed_event;
2539 bool gc_heap::gc_can_use_concurrent = false;
2541 bool gc_heap::temp_disable_concurrent_p = false;
2543 uint32_t gc_heap::cm_in_progress = FALSE;
2545 BOOL gc_heap::dont_restart_ee_p = FALSE;
2547 BOOL gc_heap::keep_bgc_threads_p = FALSE;
2549 GCEvent gc_heap::bgc_threads_sync_event;
2551 BOOL gc_heap::do_ephemeral_gc_p = FALSE;
2553 BOOL gc_heap::do_concurrent_p = FALSE;
2555 size_t gc_heap::ephemeral_fgc_counts[max_generation];
2557 BOOL gc_heap::alloc_wait_event_p = FALSE;
2559 VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
2561 #endif //BACKGROUND_GC
2563 #ifndef MULTIPLE_HEAPS
2564 #ifdef SPINLOCK_HISTORY
2565 int gc_heap::spinlock_info_index = 0;
2566 spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8];
2567 #endif //SPINLOCK_HISTORY
2569 size_t gc_heap::fgn_last_alloc = 0;
2571 int gc_heap::generation_skip_ratio = 100;
2573 uint64_t gc_heap::loh_alloc_since_cg = 0;
2575 BOOL gc_heap::elevation_requested = FALSE;
2577 BOOL gc_heap::last_gc_before_oom = FALSE;
2579 BOOL gc_heap::sufficient_gen0_space_p = FALSE;
2581 #ifdef BACKGROUND_GC
2582 uint8_t* gc_heap::background_saved_lowest_address = 0;
2583 uint8_t* gc_heap::background_saved_highest_address = 0;
2584 uint8_t* gc_heap::next_sweep_obj = 0;
2585 uint8_t* gc_heap::current_sweep_pos = 0;
2586 exclusive_sync* gc_heap::bgc_alloc_lock;
2587 #endif //BACKGROUND_GC
2589 oom_history gc_heap::oom_info;
2591 fgm_history gc_heap::fgm_result;
2593 size_t gc_heap::allocated_since_last_gc = 0;
2595 BOOL gc_heap::ro_segments_in_range;
2597 size_t gc_heap::gen0_big_free_spaces = 0;
2599 uint8_t* gc_heap::ephemeral_low;
2601 uint8_t* gc_heap::ephemeral_high;
2603 uint8_t* gc_heap::lowest_address;
2605 uint8_t* gc_heap::highest_address;
2607 BOOL gc_heap::ephemeral_promotion;
2609 uint8_t* gc_heap::saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
2610 size_t gc_heap::saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];
2612 short* gc_heap::brick_table;
2614 uint32_t* gc_heap::card_table;
2617 uint32_t* gc_heap::card_bundle_table;
2618 #endif //CARD_BUNDLE
2620 uint8_t* gc_heap::gc_low;
2622 uint8_t* gc_heap::gc_high;
2624 uint8_t* gc_heap::demotion_low;
2626 uint8_t* gc_heap::demotion_high;
2628 BOOL gc_heap::demote_gen1_p = TRUE;
2630 uint8_t* gc_heap::last_gen1_pin_end;
2632 gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons;
2634 size_t gc_heap::etw_allocation_running_amount[2];
2636 int gc_heap::gc_policy = 0;
2638 size_t gc_heap::allocation_running_time;
2640 size_t gc_heap::allocation_running_amount;
2642 heap_segment* gc_heap::ephemeral_heap_segment = 0;
2644 BOOL gc_heap::blocking_collection = FALSE;
2646 heap_segment* gc_heap::freeable_large_heap_segment = 0;
2648 size_t gc_heap::time_bgc_last = 0;
2650 size_t gc_heap::mark_stack_tos = 0;
2652 size_t gc_heap::mark_stack_bos = 0;
2654 size_t gc_heap::mark_stack_array_length = 0;
2656 mark* gc_heap::mark_stack_array = 0;
2658 #if defined (_DEBUG) && defined (VERIFY_HEAP)
2659 BOOL gc_heap::verify_pinned_queue_p = FALSE;
2660 #endif // defined (_DEBUG) && defined (VERIFY_HEAP)
2662 uint8_t* gc_heap::oldest_pinned_plug = 0;
2664 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
2665 size_t gc_heap::num_pinned_objects = 0;
2666 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
2668 #ifdef FEATURE_LOH_COMPACTION
2669 size_t gc_heap::loh_pinned_queue_tos = 0;
2671 size_t gc_heap::loh_pinned_queue_bos = 0;
2673 size_t gc_heap::loh_pinned_queue_length = 0;
2675 mark* gc_heap::loh_pinned_queue = 0;
2677 BOOL gc_heap::loh_compacted_p = FALSE;
2678 #endif //FEATURE_LOH_COMPACTION
2680 #ifdef BACKGROUND_GC
2682 EEThreadId gc_heap::bgc_thread_id;
2684 uint8_t* gc_heap::background_written_addresses [array_size+2];
2686 heap_segment* gc_heap::freeable_small_heap_segment = 0;
2688 size_t gc_heap::bgc_overflow_count = 0;
2690 size_t gc_heap::bgc_begin_loh_size = 0;
2691 size_t gc_heap::end_loh_size = 0;
2693 uint32_t gc_heap::bgc_alloc_spin_loh = 0;
2695 size_t gc_heap::bgc_loh_size_increased = 0;
2697 size_t gc_heap::bgc_loh_allocated_in_free = 0;
2699 size_t gc_heap::background_soh_alloc_count = 0;
2701 size_t gc_heap::background_loh_alloc_count = 0;
2703 uint8_t** gc_heap::background_mark_stack_tos = 0;
2705 uint8_t** gc_heap::background_mark_stack_array = 0;
2707 size_t gc_heap::background_mark_stack_array_length = 0;
2709 uint8_t* gc_heap::background_min_overflow_address =0;
2711 uint8_t* gc_heap::background_max_overflow_address =0;
2713 BOOL gc_heap::processed_soh_overflow_p = FALSE;
2715 uint8_t* gc_heap::background_min_soh_overflow_address =0;
2717 uint8_t* gc_heap::background_max_soh_overflow_address =0;
2719 heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0;
2721 uint8_t* gc_heap::saved_sweep_ephemeral_start = 0;
2723 heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0;
2725 Thread* gc_heap::bgc_thread = 0;
2727 BOOL gc_heap::expanded_in_fgc = FALSE;
2729 uint8_t** gc_heap::c_mark_list = 0;
2731 size_t gc_heap::c_mark_list_length = 0;
2733 size_t gc_heap::c_mark_list_index = 0;
2735 gc_history_per_heap gc_heap::bgc_data_per_heap;
2737 BOOL gc_heap::bgc_thread_running;
2739 CLRCriticalSection gc_heap::bgc_threads_timeout_cs;
2741 GCEvent gc_heap::gc_lh_block_event;
2743 #endif //BACKGROUND_GC
2746 uint8_t** gc_heap::mark_list;
2747 uint8_t** gc_heap::mark_list_index;
2748 uint8_t** gc_heap::mark_list_end;
2752 snoop_stats_data gc_heap::snoop_stat;
2753 #endif //SNOOP_STATS
2755 uint8_t* gc_heap::min_overflow_address = MAX_PTR;
2757 uint8_t* gc_heap::max_overflow_address = 0;
2759 uint8_t* gc_heap::shigh = 0;
2761 uint8_t* gc_heap::slow = MAX_PTR;
2763 size_t gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS];
2765 size_t gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
2767 size_t gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS];
2769 size_t gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS];
2771 BOOL gc_heap::ordered_plug_indices_init = FALSE;
2773 BOOL gc_heap::use_bestfit = FALSE;
2775 uint8_t* gc_heap::bestfit_first_pin = 0;
2777 BOOL gc_heap::commit_end_of_seg = FALSE;
2779 size_t gc_heap::max_free_space_items = 0;
2781 size_t gc_heap::free_space_buckets = 0;
2783 size_t gc_heap::free_space_items = 0;
2785 int gc_heap::trimmed_free_space_index = 0;
2787 size_t gc_heap::total_ephemeral_plugs = 0;
2789 seg_free_spaces* gc_heap::bestfit_seg = 0;
2791 size_t gc_heap::total_ephemeral_size = 0;
2795 size_t gc_heap::internal_root_array_length = initial_internal_roots;
2797 uint8_t** gc_heap::internal_root_array = 0;
2799 size_t gc_heap::internal_root_array_index = 0;
2801 BOOL gc_heap::heap_analyze_success = TRUE;
2803 uint8_t* gc_heap::current_obj = 0;
2804 size_t gc_heap::current_obj_size = 0;
2806 #endif //HEAP_ANALYZE
2808 #ifdef GC_CONFIG_DRIVEN
2809 size_t gc_heap::interesting_data_per_gc[max_idp_count];
2810 //size_t gc_heap::interesting_data_per_heap[max_idp_count];
2811 //size_t gc_heap::interesting_mechanisms_per_heap[max_im_count];
2812 #endif //GC_CONFIG_DRIVEN
2813 #endif //MULTIPLE_HEAPS
2815 no_gc_region_info gc_heap::current_no_gc_region_info;
2816 BOOL gc_heap::proceed_with_gc_p = FALSE;
2817 GCSpinLock gc_heap::gc_lock;
2819 size_t gc_heap::eph_gen_starts_size = 0;
2820 heap_segment* gc_heap::segment_standby_list;
2821 size_t gc_heap::last_gc_index = 0;
2822 #ifdef SEG_MAPPING_TABLE
2823 size_t gc_heap::min_segment_size = 0;
2824 size_t gc_heap::min_segment_size_shr = 0;
2825 #endif //SEG_MAPPING_TABLE
2826 size_t gc_heap::soh_segment_size = 0;
2827 size_t gc_heap::min_loh_segment_size = 0;
2828 size_t gc_heap::segment_info_size = 0;
2830 #ifdef GC_CONFIG_DRIVEN
2831 size_t gc_heap::time_init = 0;
2832 size_t gc_heap::time_since_init = 0;
2833 size_t gc_heap::compact_or_sweep_gcs[2];
2834 #endif //GC_CONFIG_DRIVEN
2836 #ifdef FEATURE_LOH_COMPACTION
2837 BOOL gc_heap::loh_compaction_always_p = FALSE;
2838 gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
2839 int gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;
2841 #endif //FEATURE_LOH_COMPACTION
2843 GCEvent gc_heap::full_gc_approach_event;
2845 GCEvent gc_heap::full_gc_end_event;
2847 uint32_t gc_heap::fgn_maxgen_percent = 0;
2849 uint32_t gc_heap::fgn_loh_percent = 0;
2851 #ifdef BACKGROUND_GC
2852 BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE;
2853 #endif //BACKGROUND_GC
2855 VOLATILE(bool) gc_heap::full_gc_approach_event_set;
2857 size_t gc_heap::full_gc_counts[gc_type_max];
2859 bool gc_heap::maxgen_size_inc_p = false;
2861 BOOL gc_heap::should_expand_in_full_gc = FALSE;
2863 // Provisional mode related stuff.
2864 bool gc_heap::provisional_mode_triggered = false;
2865 bool gc_heap::pm_trigger_full_gc = false;
2866 size_t gc_heap::provisional_triggered_gc_count = 0;
2867 size_t gc_heap::provisional_off_gc_count = 0;
2868 size_t gc_heap::num_provisional_triggered = 0;
2869 bool gc_heap::pm_stress_on = false;
2872 BOOL gc_heap::heap_analyze_enabled = FALSE;
2873 #endif //HEAP_ANALYZE
2875 #ifndef MULTIPLE_HEAPS
2877 alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
2878 alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];
2880 dynamic_data gc_heap::dynamic_data_table [NUMBERGENERATIONS+1];
2881 gc_history_per_heap gc_heap::gc_data_per_heap;
2882 size_t gc_heap::maxgen_pinned_compact_before_advance = 0;
2884 uint8_t* gc_heap::alloc_allocated = 0;
2886 size_t gc_heap::allocation_quantum = CLR_SIZE;
2888 GCSpinLock gc_heap::more_space_lock_soh;
2889 GCSpinLock gc_heap::more_space_lock_loh;
2890 VOLATILE(int32_t) gc_heap::loh_alloc_thread_count = 0;
2892 #ifdef SYNCHRONIZATION_STATS
2893 unsigned int gc_heap::good_suspension = 0;
2894 unsigned int gc_heap::bad_suspension = 0;
2895 uint64_t gc_heap::total_msl_acquire = 0;
2896 unsigned int gc_heap::num_msl_acquired = 0;
2897 unsigned int gc_heap::num_high_msl_acquire = 0;
2898 unsigned int gc_heap::num_low_msl_acquire = 0;
2899 #endif //SYNCHRONIZATION_STATS
2901 size_t gc_heap::alloc_contexts_used = 0;
2902 size_t gc_heap::soh_allocation_no_gc = 0;
2903 size_t gc_heap::loh_allocation_no_gc = 0;
2904 bool gc_heap::no_gc_oom_p = false;
2905 heap_segment* gc_heap::saved_loh_segment_no_gc = 0;
2907 #endif //MULTIPLE_HEAPS
2909 #ifndef MULTIPLE_HEAPS
2911 BOOL gc_heap::gen0_bricks_cleared = FALSE;
2914 int gc_heap::gen0_must_clear_bricks = 0;
2915 #endif //FFIND_OBJECT
2917 #ifdef FEATURE_PREMORTEM_FINALIZATION
2918 CFinalize* gc_heap::finalize_queue = 0;
2919 #endif // FEATURE_PREMORTEM_FINALIZATION
2921 generation gc_heap::generation_table [NUMBERGENERATIONS + 1];
2923 size_t gc_heap::interesting_data_per_heap[max_idp_count];
2925 size_t gc_heap::compact_reasons_per_heap[max_compact_reasons_count];
2927 size_t gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count];
2929 size_t gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
2931 #endif // MULTIPLE_HEAPS
2933 /* end of per heap static initialization */
2935 /* end of static initialization */
2937 #ifndef DACCESS_COMPILE
2939 void gen_to_condemn_tuning::print (int heap_num)
2942 dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition));
2943 dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header));
2944 gc_condemn_reason_gen r_gen;
2945 for (int i = 0; i < gcrg_max; i++)
2947 r_gen = (gc_condemn_reason_gen)(i);
2948 str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen));
2950 dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen));
2952 dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header));
2953 gc_condemn_reason_condition r_condition;
2954 for (int i = 0; i < gcrc_max; i++)
2956 r_condition = (gc_condemn_reason_condition)(i);
2957 str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition));
2960 dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition));
2962 UNREFERENCED_PARAMETER(heap_num);
2966 void gc_generation_data::print (int heap_num, int gen_num)
2968 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
2969 dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id",
2972 free_list_space_before, free_obj_space_before,
2974 free_list_space_after, free_obj_space_after,
2975 in, pinned_surv, npinned_surv,
2978 UNREFERENCED_PARAMETER(heap_num);
2979 UNREFERENCED_PARAMETER(gen_num);
2980 #endif //SIMPLE_DPRINTF && DT_LOG
2983 void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value)
2985 uint32_t* mechanism = &mechanisms[mechanism_per_heap];
2987 *mechanism |= mechanism_mask;
2988 *mechanism |= (1 << value);
2991 gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap];
2992 dprintf (DT_LOG_0, ("setting %s: %s",
2994 (descr->descr)[value]));
2998 void gc_history_per_heap::print()
3000 #if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
3001 for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++)
3003 gen_data[i].print (heap_index, i);
3006 dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id",
3007 maxgen_size_info.free_list_allocated,
3008 maxgen_size_info.free_list_rejected,
3009 maxgen_size_info.end_seg_allocated,
3010 maxgen_size_info.condemned_allocated,
3011 maxgen_size_info.pinned_allocated,
3012 maxgen_size_info.pinned_allocated_advance,
3013 maxgen_size_info.running_free_list_efficiency,
3014 extra_gen0_committed));
3017 gc_mechanism_descr* descr = 0;
3019 for (int i = 0; i < max_mechanism_per_heap; i++)
3021 mechanism = get_mechanism ((gc_mechanism_per_heap)i);
3025 descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i];
3026 dprintf (DT_LOG_0, ("[%2d]%s%s",
3029 (descr->descr)[mechanism]));
3032 #endif //SIMPLE_DPRINTF && DT_LOG
3035 void gc_history_global::print()
3038 char str_settings[64];
3039 memset (str_settings, '|', sizeof (char) * 64);
3040 str_settings[max_global_mechanisms_count*2] = 0;
3042 for (int i = 0; i < max_global_mechanisms_count; i++)
3044 str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N');
3047 dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|"));
3049 dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings));
3050 dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d",
3051 condemned_generation,
3052 str_gc_reasons[reason],
3053 str_gc_pause_modes[pause_mode],
3054 final_youngest_desired,
3055 gen0_reduction_count,
3060 void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num)
3062 maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info);
3063 FIRE_EVENT(GCPerHeapHistory_V3,
3064 (void *)(maxgen_size_info->free_list_allocated),
3065 (void *)(maxgen_size_info->free_list_rejected),
3066 (void *)(maxgen_size_info->end_seg_allocated),
3067 (void *)(maxgen_size_info->condemned_allocated),
3068 (void *)(maxgen_size_info->pinned_allocated),
3069 (void *)(maxgen_size_info->pinned_allocated_advance),
3070 maxgen_size_info->running_free_list_efficiency,
3071 current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(),
3072 current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(),
3073 current_gc_data_per_heap->mechanisms[gc_heap_compact],
3074 current_gc_data_per_heap->mechanisms[gc_heap_expand],
3075 current_gc_data_per_heap->heap_index,
3076 (void *)(current_gc_data_per_heap->extra_gen0_committed),
3077 (max_generation + 2),
3078 (uint32_t)(sizeof (gc_generation_data)),
3079 (void *)&(current_gc_data_per_heap->gen_data[0]));
3081 current_gc_data_per_heap->print();
3082 current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num);
3085 void gc_heap::fire_pevents()
3087 settings.record (&gc_data_global);
3088 gc_data_global.print();
3090 FIRE_EVENT(GCGlobalHeapHistory_V2,
3091 gc_data_global.final_youngest_desired,
3092 gc_data_global.num_heaps,
3093 gc_data_global.condemned_generation,
3094 gc_data_global.gen0_reduction_count,
3095 gc_data_global.reason,
3096 gc_data_global.global_mechanims_p,
3097 gc_data_global.pause_mode,
3098 gc_data_global.mem_pressure);
3100 #ifdef MULTIPLE_HEAPS
3101 for (int i = 0; i < gc_heap::n_heaps; i++)
3103 gc_heap* hp = gc_heap::g_heaps[i];
3104 gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
3105 fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number);
3108 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
3109 fire_per_heap_hist_event (current_gc_data_per_heap, heap_number);
3114 gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
3120 case tuning_deciding_condemned_gen:
3121 case tuning_deciding_compaction:
3122 case tuning_deciding_expansion:
3123 case tuning_deciding_full_gc:
3125 ret = (!ephemeral_gen_fit_p (tp));
3128 case tuning_deciding_promote_ephemeral:
3130 size_t new_gen0size = approximate_new_allocation();
3131 ptrdiff_t plan_ephemeral_size = total_ephemeral_size;
3133 dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id",
3134 heap_number, plan_ephemeral_size, new_gen0size));
3135 // If we were in no_gc_region we could have allocated a larger than normal segment,
3136 // and the next seg we allocate will be a normal sized seg so if we can't fit the new
3137 // ephemeral generations there, do an ephemeral promotion.
3138 ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size));
3149 gc_heap::dt_high_frag_p (gc_tuning_point tp,
3157 case tuning_deciding_condemned_gen:
3159 dynamic_data* dd = dynamic_data_of (gen_number);
3160 float fragmentation_burden = 0;
3164 ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd));
3165 dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id",
3166 heap_number, dd_fragmentation (dd), dd_max_size(dd)));
3170 #ifndef MULTIPLE_HEAPS
3171 if (gen_number == max_generation)
3173 float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation);
3174 if (frag_ratio > 0.65)
3176 dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100)));
3180 #endif //!MULTIPLE_HEAPS
3181 size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
3182 ret = (fr > dd_fragmentation_limit(dd));
3185 fragmentation_burden = (float)fr / generation_size (gen_number);
3186 ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd));
3188 dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d",
3189 heap_number, gen_number, dd_fragmentation (dd),
3190 (int)(100*generation_allocator_efficiency (generation_of (gen_number))),
3191 fr, (int)(fragmentation_burden*100)));
3203 gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
3209 case tuning_deciding_condemned_gen:
3211 if (gen_number == max_generation)
3213 size_t est_maxgen_free = estimated_reclaim (gen_number);
3215 uint32_t num_heaps = 1;
3216 #ifdef MULTIPLE_HEAPS
3217 num_heaps = gc_heap::n_heaps;
3218 #endif //MULTIPLE_HEAPS
3220 size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps);
3221 dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th));
3222 ret = (est_maxgen_free >= min_frag_th);
3238 // DTREVIEW: Right now we only estimate gen2 fragmentation.
3239 // on 64-bit though we should consider gen1 or even gen0 fragmentatioin as
3242 gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem)
3248 case tuning_deciding_condemned_gen:
3250 if (gen_number == max_generation)
3252 dynamic_data* dd = dynamic_data_of (gen_number);
3253 float est_frag_ratio = 0;
3254 if (dd_current_size (dd) == 0)
3258 else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0))
3264 est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd));
3267 size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio));
3268 dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id",
3271 dd_current_size (dd),
3272 dd_fragmentation (dd),
3273 (int)(est_frag_ratio*100),
3276 uint32_t num_heaps = 1;
3278 #ifdef MULTIPLE_HEAPS
3279 num_heaps = gc_heap::n_heaps;
3280 #endif //MULTIPLE_HEAPS
3281 uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps);
3282 //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th));
3283 ret = (est_frag >= min_frag_th);
3300 gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp)
3306 case tuning_deciding_condemned_gen:
3308 /* promote into max-generation if the card table has too many
3309 * generation faults besides the n -> 0
3311 ret = (generation_skip_ratio < 30);
3323 in_range_for_segment(uint8_t* add, heap_segment* seg)
3325 return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
3328 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
3329 // The array we allocate is organized as follows:
3330 // 0th element is the address of the last array we allocated.
3331 // starting from the 1st element are the segment addresses, that's
3332 // what buckets() returns.
3345 bk* buckets() { return (slots + 1); }
3346 uint8_t*& last_slot (bk* arr) { return arr[0].add; }
3349 static sorted_table* make_sorted_table ();
3350 BOOL insert (uint8_t* add, size_t val);;
3351 size_t lookup (uint8_t*& add);
3352 void remove (uint8_t* add);
3354 void delete_sorted_table();
3355 void delete_old_slots();
3356 void enqueue_old_slot(bk* sl);
3357 BOOL ensure_space_for_insert();
3361 sorted_table::make_sorted_table ()
3365 // allocate one more bk to store the older slot address.
3366 sorted_table* res = (sorted_table*)new char [sizeof (sorted_table) + (size + 1) * sizeof (bk)];
3370 res->slots = (bk*)(res + 1);
3377 sorted_table::delete_sorted_table()
3379 if (slots != (bk*)(this+1))
3387 sorted_table::delete_old_slots()
3389 uint8_t* sl = (uint8_t*)old_slots;
3393 sl = last_slot ((bk*)sl);
3399 sorted_table::enqueue_old_slot(bk* sl)
3401 last_slot (sl) = (uint8_t*)old_slots;
3407 sorted_table::lookup (uint8_t*& add)
3409 ptrdiff_t high = (count-1);
3413 bk* buck = buckets();
3416 mid = ((low + high)/2);
3418 if (buck[ti].add > add)
3420 if ((ti > 0) && (buck[ti-1].add <= add))
3422 add = buck[ti-1].add;
3423 return buck[ti - 1].val;
3429 if (buck[ti+1].add > add)
3432 return buck[ti].val;
3442 sorted_table::ensure_space_for_insert()
3446 size = (size * 3)/2;
3447 assert((size * sizeof (bk)) > 0);
3448 bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)];
3453 last_slot (res) = 0;
3454 memcpy (((bk*)res + 1), buckets(), count * sizeof (bk));
3455 bk* last_old_slots = slots;
3457 if (last_old_slots != (bk*)(this + 1))
3458 enqueue_old_slot (last_old_slots);
3464 sorted_table::insert (uint8_t* add, size_t val)
3466 //grow if no more room
3467 assert (count < size);
3470 ptrdiff_t high = (count-1);
3474 bk* buck = buckets();
3477 mid = ((low + high)/2);
3479 if (buck[ti].add > add)
3481 if ((ti == 0) || (buck[ti-1].add <= add))
3483 // found insertion point
3484 for (ptrdiff_t k = count; k > ti;k--)
3486 buck [k] = buck [k-1];
3497 if (buck[ti+1].add > add)
3499 //found the insertion point
3500 for (ptrdiff_t k = count; k > ti+1;k--)
3502 buck [k] = buck [k-1];
3504 buck[ti+1].add = add;
3505 buck[ti+1].val = val;
3517 sorted_table::remove (uint8_t* add)
3519 ptrdiff_t high = (count-1);
3523 bk* buck = buckets();
3526 mid = ((low + high)/2);
3528 if (buck[ti].add > add)
3530 if (buck[ti-1].add <= add)
3532 // found the guy to remove
3533 for (ptrdiff_t k = ti; k < count; k++)
3534 buck[k-1] = buck[k];
3542 if (buck[ti+1].add > add)
3544 // found the guy to remove
3545 for (ptrdiff_t k = ti+1; k < count; k++)
3546 buck[k-1] = buck[k];
3557 sorted_table::clear()
3560 buckets()[0].add = MAX_PTR;
3562 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
3564 #ifdef SEG_MAPPING_TABLE
3565 #ifdef GROWABLE_SEG_MAPPING_TABLE
3567 uint8_t* align_on_segment (uint8_t* add)
3569 return (uint8_t*)((size_t)(add + (gc_heap::min_segment_size - 1)) & ~(gc_heap::min_segment_size - 1));
3573 uint8_t* align_lower_segment (uint8_t* add)
3575 return (uint8_t*)((size_t)(add) & ~(gc_heap::min_segment_size - 1));
3578 size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end)
3580 from = align_lower_segment (from);
3581 end = align_on_segment (end);
3582 dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
3583 return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
3586 // for seg_mapping_table we want it to start from a pointer sized address.
3588 size_t align_for_seg_mapping_table (size_t size)
3590 return ((size + (sizeof (uint8_t*) - 1)) &~ (sizeof (uint8_t*) - 1));
3594 size_t seg_mapping_word_of (uint8_t* add)
3596 return (size_t)add >> gc_heap::min_segment_size_shr;
3598 #else //GROWABLE_SEG_MAPPING_TABLE
3599 BOOL seg_mapping_table_init()
3602 uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
3604 uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
3607 size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
3608 seg_mapping_table = new seg_mapping[num_entries];
3610 if (seg_mapping_table)
3612 memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
3613 dprintf (1, ("created %d entries for heap mapping (%Id bytes)",
3614 num_entries, (num_entries * sizeof (seg_mapping))));
3619 dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)",
3620 num_entries, (num_entries * sizeof (seg_mapping))));
3624 #endif //GROWABLE_SEG_MAPPING_TABLE
3626 #ifdef FEATURE_BASICFREEZE
3628 size_t ro_seg_begin_index (heap_segment* seg)
3630 size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3631 begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr);
3636 size_t ro_seg_end_index (heap_segment* seg)
3638 size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr;
3639 end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr);
3643 void seg_mapping_table_add_ro_segment (heap_segment* seg)
3645 #ifdef GROWABLE_SEG_MAPPING_TABLE
3646 if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
3648 #endif //GROWABLE_SEG_MAPPING_TABLE
3650 for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
3651 seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
3654 void seg_mapping_table_remove_ro_segment (heap_segment* seg)
3656 UNREFERENCED_PARAMETER(seg);
3658 // POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves
3659 // to be a perf problem, we can search in the current ro segs and see if any lands in this range and only
3660 // remove the flag if none lands in this range.
3664 heap_segment* ro_segment_lookup (uint8_t* o)
3666 uint8_t* ro_seg_start = o;
3667 heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start);
3669 if (ro_seg_start && in_range_for_segment (o, seg))
3675 #endif //FEATURE_BASICFREEZE
3677 void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp)
3679 size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3680 size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3681 seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3682 size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3683 seg_mapping* end_entry = &seg_mapping_table[end_index];
3685 dprintf (1, ("adding seg %Ix(%d)-%Ix(%d)",
3686 seg, begin_index, heap_segment_reserved (seg), end_index));
3688 dprintf (1, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
3689 begin_index, (seg_mapping_table[begin_index].boundary + 1),
3690 end_index, (seg_mapping_table[end_index].boundary + 1)));
3692 #ifdef MULTIPLE_HEAPS
3693 #ifdef SIMPLE_DPRINTF
3694 dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)",
3695 begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3696 (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3697 end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3698 (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3699 #endif //SIMPLE_DPRINTF
3700 assert (end_entry->boundary == 0);
3701 assert (end_entry->h0 == 0);
3703 assert (begin_entry->h1 == 0);
3704 begin_entry->h1 = hp;
3706 UNREFERENCED_PARAMETER(hp);
3707 #endif //MULTIPLE_HEAPS
3709 end_entry->boundary = (uint8_t*)seg_end;
3711 dprintf (1, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg));
3712 assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry));
3713 begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg);
3714 end_entry->seg0 = seg;
3716 // for every entry inbetween we need to set its heap too.
3717 for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3719 assert (seg_mapping_table[entry_index].boundary == 0);
3720 #ifdef MULTIPLE_HEAPS
3721 assert (seg_mapping_table[entry_index].h0 == 0);
3722 seg_mapping_table[entry_index].h1 = hp;
3723 #endif //MULTIPLE_HEAPS
3724 seg_mapping_table[entry_index].seg1 = seg;
3727 dprintf (1, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
3728 begin_index, (seg_mapping_table[begin_index].boundary + 1),
3729 end_index, (seg_mapping_table[end_index].boundary + 1)));
3730 #if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF)
3731 dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)",
3732 begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
3733 (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
3734 end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
3735 (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
3736 #endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF
3739 void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg)
3741 size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
3742 size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
3743 seg_mapping* begin_entry = &seg_mapping_table[begin_index];
3744 size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
3745 seg_mapping* end_entry = &seg_mapping_table[end_index];
3746 dprintf (1, ("removing seg %Ix(%d)-%Ix(%d)",
3747 seg, begin_index, heap_segment_reserved (seg), end_index));
3749 assert (end_entry->boundary == (uint8_t*)seg_end);
3750 end_entry->boundary = 0;
3752 #ifdef MULTIPLE_HEAPS
3753 gc_heap* hp = heap_segment_heap (seg);
3754 assert (end_entry->h0 == hp);
3756 assert (begin_entry->h1 == hp);
3757 begin_entry->h1 = 0;
3758 #endif //MULTIPLE_HEAPS
3760 assert (begin_entry->seg1 != 0);
3761 begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry);
3762 end_entry->seg0 = 0;
3764 // for every entry inbetween we need to reset its heap too.
3765 for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
3767 assert (seg_mapping_table[entry_index].boundary == 0);
3768 #ifdef MULTIPLE_HEAPS
3769 assert (seg_mapping_table[entry_index].h0 == 0);
3770 assert (seg_mapping_table[entry_index].h1 == hp);
3771 seg_mapping_table[entry_index].h1 = 0;
3772 #endif //MULTIPLE_HEAPS
3773 seg_mapping_table[entry_index].seg1 = 0;
3776 dprintf (1, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
3777 begin_index, (seg_mapping_table[begin_index].boundary + 1),
3778 end_index, (seg_mapping_table[end_index].boundary + 1)));
3779 #ifdef MULTIPLE_HEAPS
3780 dprintf (1, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix",
3781 begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1),
3782 end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1)));
3783 #endif //MULTIPLE_HEAPS
3786 #ifdef MULTIPLE_HEAPS
3788 gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
3790 size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3791 seg_mapping* entry = &seg_mapping_table[index];
3793 gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0);
3795 dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix",
3796 o, index, (entry->boundary + 1),
3797 (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0),
3798 (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1)));
3801 heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3802 #ifdef FEATURE_BASICFREEZE
3803 if ((size_t)seg & ro_in_entry)
3804 seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3805 #endif //FEATURE_BASICFREEZE
3809 if (in_range_for_segment (o, seg))
3811 dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg)));
3815 dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg",
3816 seg, (uint8_t*)heap_segment_allocated (seg), o));
3821 dprintf (2, ("could not find obj %Ix in any existing segments", o));
3828 gc_heap* seg_mapping_table_heap_of (uint8_t* o)
3830 #ifdef GROWABLE_SEG_MAPPING_TABLE
3831 if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3833 #endif //GROWABLE_SEG_MAPPING_TABLE
3835 return seg_mapping_table_heap_of_worker (o);
3838 gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
3840 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3841 if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3843 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3845 return seg_mapping_table_heap_of_worker (o);
3847 #endif //MULTIPLE_HEAPS
3849 // Only returns a valid seg if we can actually find o on the seg.
3850 heap_segment* seg_mapping_table_segment_of (uint8_t* o)
3852 #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
3853 if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
3854 #ifdef FEATURE_BASICFREEZE
3855 return ro_segment_lookup (o);
3858 #endif //FEATURE_BASICFREEZE
3859 #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
3861 size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
3862 seg_mapping* entry = &seg_mapping_table[index];
3864 dprintf (2, ("checking obj %Ix, index is %Id, entry: boundry: %Ix, seg0: %Ix, seg1: %Ix",
3865 o, index, (entry->boundary + 1),
3866 (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1)));
3868 heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
3869 #ifdef FEATURE_BASICFREEZE
3870 if ((size_t)seg & ro_in_entry)
3871 seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
3872 #endif //FEATURE_BASICFREEZE
3876 if (in_range_for_segment (o, seg))
3878 dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg)));
3882 dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0",
3883 (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o));
3889 dprintf (2, ("could not find obj %Ix in any existing segments", o));
3892 #ifdef FEATURE_BASICFREEZE
3893 // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro
3894 // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an
3895 // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does
3896 // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest)
3897 // range changes. We should probably go ahead and modify grow_brick_card_table and put back the
3898 // "&& (size_t)(entry->seg1) & ro_in_entry" here.
3901 seg = ro_segment_lookup (o);
3902 if (seg && !in_range_for_segment (o, seg))
3905 #endif //FEATURE_BASICFREEZE
3909 #endif //SEG_MAPPING_TABLE
3911 size_t gcard_of ( uint8_t*);
3913 #define memref(i) *(uint8_t**)(i)
3916 #define GC_MARKED (size_t)0x1
3917 #define slot(i, j) ((uint8_t**)(i))[j+1]
3919 #define free_object_base_size (plug_skew + sizeof(ArrayBase))
3921 class CObjectHeader : public Object
3925 #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
3926 // The GC expects the following methods that are provided by the Object class in the CLR but not provided
3927 // by Redhawk's version of Object.
3928 uint32_t GetNumComponents()
3930 return ((ArrayBase *)this)->GetNumComponents();
3933 void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
3935 UNREFERENCED_PARAMETER(bVerifyNextHeader);
3940 MethodTable * pMT = GetMethodTable();
3942 _ASSERTE(pMT->SanityCheck());
3944 bool noRangeChecks =
3945 (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS;
3947 BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
3950 fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
3951 if (!fSmallObjectHeapPtr)
3952 fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);
3954 _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
3957 #ifdef FEATURE_STRUCTALIGN
3958 _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment()));
3959 #endif // FEATURE_STRUCTALIGN
3961 #ifdef FEATURE_64BIT_ALIGNMENT
3962 if (pMT->RequiresAlign8())
3964 _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U));
3966 #endif // FEATURE_64BIT_ALIGNMENT
3969 if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
3970 g_theGCHeap->ValidateObjectMember(this);
3972 if (fSmallObjectHeapPtr)
3974 #ifdef FEATURE_BASICFREEZE
3975 _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
3977 _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
3982 void ValidatePromote(ScanContext *sc, uint32_t flags)
3984 UNREFERENCED_PARAMETER(sc);
3985 UNREFERENCED_PARAMETER(flags);
3990 void ValidateHeap(Object *from, BOOL bDeep)
3992 UNREFERENCED_PARAMETER(from);
3994 Validate(bDeep, FALSE);
3997 #endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE
4001 // Header Status Information
4004 MethodTable *GetMethodTable() const
4006 return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~(GC_MARKED))));
4011 RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED));
4014 BOOL IsMarked() const
4016 return !!(((size_t)RawGetMethodTable()) & GC_MARKED);
4021 assert (!(gc_heap::settings.concurrent));
4022 GetHeader()->SetGCBit();
4025 BOOL IsPinned() const
4027 return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE);
4032 RawSetMethodTable( GetMethodTable() );
4035 CGCDesc *GetSlotMap ()
4037 assert (GetMethodTable()->ContainsPointers());
4038 return CGCDesc::GetCGCDescFromMT(GetMethodTable());
4041 void SetFree(size_t size)
4043 assert (size >= free_object_base_size);
4045 assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
4046 assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1);
4048 RawSetMethodTable( g_gc_pFreeObjectMethodTable );
4050 size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()];
4051 *numComponentsPtr = size - free_object_base_size;
4053 //This introduces a bug in the free list management.
4054 //((void**) this)[-1] = 0; // clear the sync block,
4055 assert (*numComponentsPtr >= 0);
4056 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
4057 memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr);
4058 #endif //VERIFY_HEAP
4063 size_t size = free_object_base_size - plug_skew;
4065 // since we only need to clear 2 ptr size, we do it manually
4066 PTR_PTR m = (PTR_PTR) this;
4067 for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
4071 BOOL IsFree () const
4073 return (GetMethodTable() == g_gc_pFreeObjectMethodTable);
4076 #ifdef FEATURE_STRUCTALIGN
4077 int GetRequiredAlignment () const
4079 return GetMethodTable()->GetRequiredAlignment();
4081 #endif // FEATURE_STRUCTALIGN
4083 BOOL ContainsPointers() const
4085 return GetMethodTable()->ContainsPointers();
4088 #ifdef COLLECTIBLE_CLASS
4089 BOOL Collectible() const
4091 return GetMethodTable()->Collectible();
4094 FORCEINLINE BOOL ContainsPointersOrCollectible() const
4096 MethodTable *pMethodTable = GetMethodTable();
4097 return (pMethodTable->ContainsPointers() || pMethodTable->Collectible());
4099 #endif //COLLECTIBLE_CLASS
4101 Object* GetObjectBase() const
4103 return (Object*) this;
4107 #define header(i) ((CObjectHeader*)(i))
4109 #define free_list_slot(x) ((uint8_t**)(x))[2]
4110 #define free_list_undo(x) ((uint8_t**)(x))[-1]
4111 #define UNDO_EMPTY ((uint8_t*)1)
4115 void set_plug_padded (uint8_t* node)
4117 header(node)->SetMarked();
4120 void clear_plug_padded (uint8_t* node)
4122 header(node)->ClearMarked();
4125 BOOL is_plug_padded (uint8_t* node)
4127 return header(node)->IsMarked();
4130 inline void set_plug_padded (uint8_t* node){}
4131 inline void clear_plug_padded (uint8_t* node){}
4133 BOOL is_plug_padded (uint8_t* node){return FALSE;}
4134 #endif //SHORT_PLUGS
4137 inline size_t unused_array_size(uint8_t * p)
4139 assert(((CObjectHeader*)p)->IsFree());
4141 size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents());
4142 return free_object_base_size + *numComponentsPtr;
4145 heap_segment* heap_segment_rw (heap_segment* ns)
4147 if ((ns == 0) || !heap_segment_read_only_p (ns))
4155 ns = heap_segment_next (ns);
4156 } while ((ns != 0) && heap_segment_read_only_p (ns));
4161 //returns the next non ro segment.
4162 heap_segment* heap_segment_next_rw (heap_segment* seg)
4164 heap_segment* ns = heap_segment_next (seg);
4165 return heap_segment_rw (ns);
4168 // returns the segment before seg.
4169 heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg)
4171 assert (begin != 0);
4172 heap_segment* prev = begin;
4173 heap_segment* current = heap_segment_next_rw (begin);
4175 while (current && current != seg)
4178 current = heap_segment_next_rw (current);
4191 // returns the segment before seg.
4192 heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg)
4194 assert (begin != 0);
4195 heap_segment* prev = begin;
4196 heap_segment* current = heap_segment_next (begin);
4198 while (current && current != seg)
4201 current = heap_segment_next (current);
4214 heap_segment* heap_segment_in_range (heap_segment* ns)
4216 if ((ns == 0) || heap_segment_in_range_p (ns))
4224 ns = heap_segment_next (ns);
4225 } while ((ns != 0) && !heap_segment_in_range_p (ns));
4230 heap_segment* heap_segment_next_in_range (heap_segment* seg)
4232 heap_segment* ns = heap_segment_next (seg);
4233 return heap_segment_in_range (ns);
4238 uint8_t* memory_base;
4243 imemory_data *initial_memory;
4244 imemory_data *initial_normal_heap; // points into initial_memory_array
4245 imemory_data *initial_large_heap; // points into initial_memory_array
4247 size_t block_size_normal;
4248 size_t block_size_large;
4250 size_t block_count; // # of blocks in each
4251 size_t current_block_normal;
4252 size_t current_block_large;
4261 size_t allocation_pattern;
4262 } initial_memory_details;
4264 initial_memory_details memory_details;
4266 BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
4268 BOOL reserve_success = FALSE;
4270 // should only be called once
4271 assert (memory_details.initial_memory == 0);
4273 memory_details.initial_memory = new (nothrow) imemory_data[num_heaps*2];
4274 if (memory_details.initial_memory == 0)
4276 dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps*2*sizeof(imemory_data)));
4280 memory_details.initial_normal_heap = memory_details.initial_memory;
4281 memory_details.initial_large_heap = memory_details.initial_memory + num_heaps;
4282 memory_details.block_size_normal = normal_size;
4283 memory_details.block_size_large = large_size;
4284 memory_details.block_count = num_heaps;
4286 memory_details.current_block_normal = 0;
4287 memory_details.current_block_large = 0;
4289 g_gc_lowest_address = MAX_PTR;
4290 g_gc_highest_address = 0;
4292 if (((size_t)MAX_PTR - large_size) < normal_size)
4294 // we are already overflowing with just one heap.
4295 dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size));
4299 if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size))
4301 dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count));
4305 size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
4307 uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
4308 if (allatonce_block)
4310 g_gc_lowest_address = allatonce_block;
4311 g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
4312 memory_details.allocation_pattern = initial_memory_details::ALLATONCE;
4314 for(size_t i = 0; i < memory_details.block_count; i++)
4316 memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i*normal_size);
4317 memory_details.initial_large_heap[i].memory_base = allatonce_block +
4318 (memory_details.block_count*normal_size) + (i*large_size);
4319 reserve_success = TRUE;
4324 // try to allocate 2 blocks
4327 b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
4330 b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
4333 memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
4334 g_gc_lowest_address = min(b1,b2);
4335 g_gc_highest_address = max(b1 + memory_details.block_count*normal_size,
4336 b2 + memory_details.block_count*large_size);
4337 for(size_t i = 0; i < memory_details.block_count; i++)
4339 memory_details.initial_normal_heap[i].memory_base = b1 + (i*normal_size);
4340 memory_details.initial_large_heap[i].memory_base = b2 + (i*large_size);
4341 reserve_success = TRUE;
4346 // b2 allocation failed, we'll go on to try allocating each block.
4347 // We could preserve the b1 alloc, but code complexity increases
4348 virtual_free (b1, memory_details.block_count * normal_size);
4352 if ((b2==NULL) && ( memory_details.block_count > 1))
4354 memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK;
4356 imemory_data *current_block = memory_details.initial_memory;
4357 for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4359 size_t block_size = ((i < memory_details.block_count) ?
4360 memory_details.block_size_normal :
4361 memory_details.block_size_large);
4362 current_block->memory_base =
4363 (uint8_t*)virtual_alloc (block_size);
4364 if (current_block->memory_base == 0)
4366 // Free the blocks that we've allocated so far
4367 current_block = memory_details.initial_memory;
4368 for(size_t j = 0; j < i; j++, current_block++){
4369 if (current_block->memory_base != 0){
4370 block_size = ((j < memory_details.block_count) ?
4371 memory_details.block_size_normal :
4372 memory_details.block_size_large);
4373 virtual_free (current_block->memory_base , block_size);
4376 reserve_success = FALSE;
4381 if (current_block->memory_base < g_gc_lowest_address)
4382 g_gc_lowest_address = current_block->memory_base;
4383 if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address)
4384 g_gc_highest_address = (current_block->memory_base + block_size);
4386 reserve_success = TRUE;
4391 return reserve_success;
4394 void destroy_initial_memory()
4396 if (memory_details.initial_memory != NULL)
4398 if (memory_details.allocation_pattern == initial_memory_details::ALLATONCE)
4400 virtual_free(memory_details.initial_memory[0].memory_base,
4401 memory_details.block_count*(memory_details.block_size_normal +
4402 memory_details.block_size_large));
4404 else if (memory_details.allocation_pattern == initial_memory_details::TWO_STAGE)
4406 virtual_free (memory_details.initial_normal_heap[0].memory_base,
4407 memory_details.block_count*memory_details.block_size_normal);
4409 virtual_free (memory_details.initial_large_heap[0].memory_base,
4410 memory_details.block_count*memory_details.block_size_large);
4414 assert (memory_details.allocation_pattern == initial_memory_details::EACH_BLOCK);
4415 imemory_data *current_block = memory_details.initial_memory;
4416 for(size_t i = 0; i < (memory_details.block_count*2); i++, current_block++)
4418 size_t block_size = (i < memory_details.block_count) ? memory_details.block_size_normal :
4419 memory_details.block_size_large;
4420 if (current_block->memory_base != NULL)
4422 virtual_free (current_block->memory_base, block_size);
4427 delete [] memory_details.initial_memory;
4428 memory_details.initial_memory = NULL;
4429 memory_details.initial_normal_heap = NULL;
4430 memory_details.initial_large_heap = NULL;
4434 void* next_initial_memory (size_t size)
4436 assert ((size == memory_details.block_size_normal) || (size == memory_details.block_size_large));
4439 if ((size != memory_details.block_size_normal) ||
4440 ((memory_details.current_block_normal == memory_details.block_count) &&
4441 (memory_details.block_size_normal == memory_details.block_size_large)))
4443 // If the block sizes are the same, flow block requests from normal to large
4444 assert (memory_details.current_block_large < memory_details.block_count);
4445 assert (memory_details.initial_large_heap != 0);
4447 res = memory_details.initial_large_heap[memory_details.current_block_large].memory_base;
4448 memory_details.current_block_large++;
4452 assert (memory_details.current_block_normal < memory_details.block_count);
4453 assert (memory_details.initial_normal_heap != NULL);
4455 res = memory_details.initial_normal_heap[memory_details.current_block_normal].memory_base;
4456 memory_details.current_block_normal++;
4462 heap_segment* get_initial_segment (size_t size, int h_number)
4464 void* mem = next_initial_memory (size);
4465 heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size , h_number);
4470 void* virtual_alloc (size_t size)
4472 size_t requested_size = size;
4474 if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4476 gc_heap::reserved_memory_limit =
4477 GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
4478 if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
4484 uint32_t flags = VirtualReserveFlags::None;
4485 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4486 if (virtual_alloc_hardware_write_watch)
4488 flags = VirtualReserveFlags::WriteWatch;
4490 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
4491 void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
4492 void *aligned_mem = prgmem;
4494 // We don't want (prgmem + size) to be right at the end of the address space
4495 // because we'd have to worry about that everytime we do (address + size).
4496 // We also want to make sure that we leave loh_size_threshold at the end
4497 // so we allocate a small object we don't need to worry about overflow there
4498 // when we do alloc_ptr+size.
4501 uint8_t* end_mem = (uint8_t*)prgmem + requested_size;
4503 if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
4505 GCToOSInterface::VirtualRelease (prgmem, requested_size);
4506 dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
4507 requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4515 gc_heap::reserved_memory += requested_size;
4518 dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[",
4519 requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
4524 void virtual_free (void* add, size_t size)
4526 GCToOSInterface::VirtualRelease (add, size);
4527 gc_heap::reserved_memory -= size;
4528 dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
4529 size, (size_t)add, (size_t)((uint8_t*)add+size)));
4532 static size_t get_valid_segment_size (BOOL large_seg=FALSE)
4534 size_t seg_size, initial_seg_size;
4538 initial_seg_size = INITIAL_ALLOC;
4539 seg_size = static_cast<size_t>(GCConfig::GetSegmentSize());
4543 initial_seg_size = LHEAP_ALLOC;
4544 seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2;
4547 #ifdef MULTIPLE_HEAPS
4552 if (g_num_processors > 4)
4553 initial_seg_size /= 2;
4554 if (g_num_processors > 8)
4555 initial_seg_size /= 2;
4557 #endif //MULTIPLE_HEAPS
4559 // if seg_size is small but not 0 (0 is default if config not set)
4560 // then set the segment to the minimum size
4561 if (!g_theGCHeap->IsValidSegmentSize(seg_size))
4563 // if requested size is between 1 byte and 4MB, use min
4564 if ((seg_size >> 1) && !(seg_size >> 22))
4565 seg_size = 1024*1024*4;
4567 seg_size = initial_seg_size;
4570 #ifdef SEG_MAPPING_TABLE
4572 seg_size = round_up_power2 (seg_size);
4574 seg_size = round_down_power2 (seg_size);
4576 #endif //SEG_MAPPING_TABLE
4582 gc_heap::compute_new_ephemeral_size()
4584 int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0);
4585 size_t padding_size = 0;
4587 for (int i = 0; i <= eph_gen_max; i++)
4589 dynamic_data* dd = dynamic_data_of (i);
4590 total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd));
4591 #ifdef RESPECT_LARGE_ALIGNMENT
4592 total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE);
4593 #endif //RESPECT_LARGE_ALIGNMENT
4594 #ifdef FEATURE_STRUCTALIGN
4595 total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN;
4596 #endif //FEATURE_STRUCTALIGN
4599 padding_size += dd_padding_size (dd);
4600 #endif //SHORT_PLUGS
4603 total_ephemeral_size += eph_gen_starts_size;
4605 #ifdef RESPECT_LARGE_ALIGNMENT
4606 size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) -
4607 generation_plan_allocation_start (generation_of (max_generation-1));
4608 total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size);
4609 #endif //RESPECT_LARGE_ALIGNMENT
4612 total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1);
4613 total_ephemeral_size += Align (DESIRED_PLUG_LENGTH);
4614 #endif //SHORT_PLUGS
4616 dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)",
4617 total_ephemeral_size,
4618 padding_size, (total_ephemeral_size - padding_size)));
4622 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
4626 gc_heap::soh_get_segment_to_expand()
4628 size_t size = soh_segment_size;
4630 ordered_plug_indices_init = FALSE;
4631 use_bestfit = FALSE;
4633 //compute the size of the new ephemeral heap segment.
4634 compute_new_ephemeral_size();
4636 if ((settings.pause_mode != pause_low_latency) &&
4637 (settings.pause_mode != pause_no_gc)
4638 #ifdef BACKGROUND_GC
4639 && (!recursive_gc_sync::background_running_p())
4640 #endif //BACKGROUND_GC
4643 allocator* gen_alloc = ((settings.condemned_generation == max_generation) ? 0 :
4644 generation_allocator (generation_of (max_generation)));
4645 dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation));
4647 // try to find one in the gen 2 segment list, search backwards because the first segments
4648 // tend to be more compact than the later ones.
4649 heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
4651 PREFIX_ASSUME(fseg != NULL);
4653 #ifdef SEG_REUSE_STATS
4655 #endif //SEG_REUSE_STATS
4657 heap_segment* seg = ephemeral_heap_segment;
4658 while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg))
4660 #ifdef SEG_REUSE_STATS
4662 #endif //SEG_REUSE_STATS
4664 if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc))
4666 get_gc_data_per_heap()->set_mechanism (gc_heap_expand,
4667 (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal));
4668 if (settings.condemned_generation == max_generation)
4672 build_ordered_free_spaces (seg);
4673 dprintf (GTC_LOG, ("can use best fit"));
4676 #ifdef SEG_REUSE_STATS
4677 dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse",
4678 settings.condemned_generation, try_reuse));
4679 #endif //SEG_REUSE_STATS
4680 dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg));
4685 #ifdef SEG_REUSE_STATS
4686 dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning",
4687 settings.condemned_generation, try_reuse));
4688 #endif //SEG_REUSE_STATS
4689 dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg));
4691 // If we return 0 here, the allocator will think since we are short on end
4692 // of seg we neeed to trigger a full compacting GC. So if sustained low latency
4693 // is set we should acquire a new seg instead, that way we wouldn't be short.
4694 // The real solution, of course, is to actually implement seg reuse in gen1.
4695 if (settings.pause_mode != pause_sustained_low_latency)
4697 dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg"));
4698 get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc);
4706 heap_segment* result = get_segment (size, FALSE);
4710 #ifdef BACKGROUND_GC
4711 if (current_c_gc_state == c_gc_state_planning)
4713 // When we expand heap during bgc sweep, we set the seg to be swept so
4714 // we'll always look at cards for objects on the new segment.
4715 result->flags |= heap_segment_flags_swept;
4717 #endif //BACKGROUND_GC
4719 FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result),
4720 (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)),
4721 gc_etw_segment_small_object_heap);
4724 get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));
4728 dprintf (2, ("h%d: failed to allocate a new segment!", heap_number));
4732 #ifdef MULTIPLE_HEAPS
4733 heap_segment_heap (result) = this;
4734 #endif //MULTIPLE_HEAPS
4737 dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result));
4742 #pragma warning(default:4706)
4745 //returns 0 in case of allocation failure
4747 gc_heap::get_segment (size_t size, BOOL loh_p)
4749 if (heap_hard_limit)
4752 heap_segment* result = 0;
4754 if (segment_standby_list != 0)
4756 result = segment_standby_list;
4757 heap_segment* last = 0;
4760 size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result);
4761 if ((hs >= size) && ((hs / 2) < size))
4763 dprintf (2, ("Hoarded segment %Ix found", (size_t) result));
4766 heap_segment_next (last) = heap_segment_next (result);
4770 segment_standby_list = heap_segment_next (result);
4777 result = heap_segment_next (result);
4784 init_heap_segment (result);
4785 #ifdef BACKGROUND_GC
4786 if (should_commit_mark_array())
4788 dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array));
4789 if (!commit_mark_array_new_seg (__this, result))
4791 dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg"));
4792 // If we can't use it we need to thread it back.
4793 if (segment_standby_list != 0)
4795 heap_segment_next (result) = segment_standby_list;
4796 segment_standby_list = result;
4800 segment_standby_list = result;
4806 #endif //BACKGROUND_GC
4808 #ifdef SEG_MAPPING_TABLE
4810 seg_mapping_table_add_segment (result, __this);
4811 #endif //SEG_MAPPING_TABLE
4816 #ifndef SEG_MAPPING_TABLE
4817 if (!seg_table->ensure_space_for_insert ())
4819 #endif //SEG_MAPPING_TABLE
4820 void* mem = virtual_alloc (size);
4823 fgm_result.set_fgm (fgm_reserve_segment, size, loh_p);
4827 result = gc_heap::make_heap_segment ((uint8_t*)mem, size, heap_number);
4833 if (mem < g_gc_lowest_address)
4835 start = (uint8_t*)mem;
4839 start = (uint8_t*)g_gc_lowest_address;
4842 if (((uint8_t*)mem + size) > g_gc_highest_address)
4844 end = (uint8_t*)mem + size;
4848 end = (uint8_t*)g_gc_highest_address;
4851 if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
4853 virtual_free (mem, size);
4859 fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, loh_p);
4860 virtual_free (mem, size);
4865 #ifdef SEG_MAPPING_TABLE
4866 seg_mapping_table_add_segment (result, __this);
4867 #else //SEG_MAPPING_TABLE
4868 gc_heap::seg_table->insert ((uint8_t*)result, delta);
4869 #endif //SEG_MAPPING_TABLE
4873 #ifdef BACKGROUND_GC
4876 ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result),
4877 settings.gc_index, current_bgc_state,
4879 bgc_verify_mark_array_cleared (result);
4881 #endif //BACKGROUND_GC
4883 dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size));
4887 void release_segment (heap_segment* sg)
4889 ptrdiff_t delta = 0;
4890 FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg));
4891 virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg);
4894 heap_segment* gc_heap::get_segment_for_loh (size_t size
4895 #ifdef MULTIPLE_HEAPS
4897 #endif //MULTIPLE_HEAPS
4900 #ifndef MULTIPLE_HEAPS
4902 #endif //MULTIPLE_HEAPS
4903 heap_segment* res = hp->get_segment (size, TRUE);
4906 #ifdef MULTIPLE_HEAPS
4907 heap_segment_heap (res) = hp;
4908 #endif //MULTIPLE_HEAPS
4909 res->flags |= heap_segment_flags_loh;
4911 FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), gc_etw_segment_large_object_heap);
4913 GCToEEInterface::DiagUpdateGenerationBounds();
4915 #ifdef MULTIPLE_HEAPS
4916 hp->thread_loh_segment (res);
4918 thread_loh_segment (res);
4919 #endif //MULTIPLE_HEAPS
4925 void gc_heap::thread_loh_segment (heap_segment* new_seg)
4927 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
4929 while (heap_segment_next_rw (seg))
4930 seg = heap_segment_next_rw (seg);
4931 heap_segment_next (seg) = new_seg;
4935 gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
4937 *did_full_compact_gc = FALSE;
4938 size_t last_full_compact_gc_count = get_full_compact_gc_count();
4940 //access to get_segment needs to be serialized
4941 add_saved_spinlock_info (true, me_release, mt_get_large_seg);
4942 leave_spin_lock (&more_space_lock_loh);
4943 enter_spin_lock (&gc_heap::gc_lock);
4944 dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number));
4945 // if a GC happened between here and before we ask for a segment in
4946 // get_large_segment, we need to count that GC.
4947 size_t current_full_compact_gc_count = get_full_compact_gc_count();
4949 if (current_full_compact_gc_count > last_full_compact_gc_count)
4951 *did_full_compact_gc = TRUE;
4954 heap_segment* res = get_segment_for_loh (size
4955 #ifdef MULTIPLE_HEAPS
4957 #endif //MULTIPLE_HEAPS
4960 dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number));
4961 leave_spin_lock (&gc_heap::gc_lock);
4962 enter_spin_lock (&more_space_lock_loh);
4963 add_saved_spinlock_info (true, me_acquire, mt_get_large_seg);
4969 BOOL gc_heap::unprotect_segment (heap_segment* seg)
4971 uint8_t* start = align_lower_page (heap_segment_mem (seg));
4972 ptrdiff_t region_size = heap_segment_allocated (seg) - start;
4974 if (region_size != 0 )
4976 dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
4978 BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
4986 #ifdef MULTIPLE_HEAPS
4989 #pragma warning(disable:4035)
4990 static ptrdiff_t get_cycle_count()
4994 #pragma warning(default:4035)
4995 #elif defined(__GNUC__)
4996 static ptrdiff_t get_cycle_count()
5000 __asm__ __volatile__
5001 ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
5005 #error Unknown compiler
5007 #elif defined(_TARGET_AMD64_)
5009 extern "C" uint64_t __rdtsc();
5010 #pragma intrinsic(__rdtsc)
5011 static ptrdiff_t get_cycle_count()
5013 return (ptrdiff_t)__rdtsc();
5015 #elif defined(__GNUC__)
5016 static ptrdiff_t get_cycle_count()
5020 __asm__ __volatile__
5021 ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
5022 return (cyclesHi << 32) | cycles;
5025 extern "C" ptrdiff_t get_cycle_count(void);
5027 #elif defined(_TARGET_ARM_)
5028 static ptrdiff_t get_cycle_count()
5030 // @ARMTODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5031 // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5032 // all buffer access times being reported as equal in access_time().
5035 #elif defined(_TARGET_ARM64_)
5036 static ptrdiff_t get_cycle_count()
5038 // @ARM64TODO: cycle counter is not exposed to user mode by CoreARM. For now (until we can show this
5039 // makes a difference on the ARM configurations on which we'll run) just return 0. This will result in
5040 // all buffer access times being reported as equal in access_time().
5044 #error NYI platform: get_cycle_count
5045 #endif //_TARGET_X86_
5050 static uint8_t* sniff_buffer;
5051 static unsigned n_sniff_buffers;
5052 static unsigned cur_sniff_index;
5054 static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5055 static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5056 static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5057 static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5059 static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers)
5061 ptrdiff_t start_cycles = get_cycle_count();
5062 uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE];
5063 assert (sniff == 0);
5064 ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles;
5065 // add sniff here just to defeat the optimizer
5066 elapsed_cycles += sniff;
5067 return (int) elapsed_cycles;
5071 static BOOL init(int n_heaps)
5073 assert (sniff_buffer == NULL && n_sniff_buffers == 0);
5074 if (!GCToOSInterface::CanGetCurrentProcessorNumber())
5076 n_sniff_buffers = n_heaps*2+1;
5077 size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1;
5078 size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
5079 if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow
5084 sniff_buffer = new (nothrow) uint8_t[sniff_buf_size];
5085 if (sniff_buffer == 0)
5087 memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t));
5090 //can not enable gc numa aware, force all heaps to be in
5091 //one numa node by filling the array with all 0s
5092 if (!GCToOSInterface::CanEnableGCNumaAware())
5093 memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));
5098 static void init_cpu_mapping(gc_heap * /*heap*/, int heap_number)
5100 if (GCToOSInterface::CanGetCurrentProcessorNumber())
5102 uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
5103 // We can safely cast heap_number to a uint16_t 'cause GetCurrentProcessCpuCount
5104 // only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
5105 // MAX_SUPPORTED_CPUS GC threads.
5106 proc_no_to_heap_no[proc_no] = (uint16_t)heap_number;
5110 static void mark_heap(int heap_number)
5112 if (GCToOSInterface::CanGetCurrentProcessorNumber())
5115 for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
5116 sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5119 static int select_heap(alloc_context* acontext, int /*hint*/)
5121 UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf
5123 if (GCToOSInterface::CanGetCurrentProcessorNumber())
5124 return proc_no_to_heap_no[GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps];
5126 unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
5127 sniff_index %= n_sniff_buffers;
5130 int best_access_time = 1000*1000*1000;
5131 int second_best_access_time = best_access_time;
5133 uint8_t *l_sniff_buffer = sniff_buffer;
5134 unsigned l_n_sniff_buffers = n_sniff_buffers;
5135 for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++)
5137 int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers);
5138 if (this_access_time < best_access_time)
5140 second_best_access_time = best_access_time;
5141 best_access_time = this_access_time;
5142 best_heap = heap_number;
5144 else if (this_access_time < second_best_access_time)
5146 second_best_access_time = this_access_time;
5150 if (best_access_time*2 < second_best_access_time)
5152 sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
5154 dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext));
5158 dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext ));
5164 static bool can_find_heap_fast()
5166 return GCToOSInterface::CanGetCurrentProcessorNumber();
5169 static uint16_t find_proc_no_from_heap_no(int heap_number)
5171 return heap_no_to_proc_no[heap_number];
5174 static void set_proc_no_for_heap(int heap_number, uint16_t proc_no)
5176 heap_no_to_proc_no[heap_number] = proc_no;
5179 static uint16_t find_numa_node_from_heap_no(int heap_number)
5181 return heap_no_to_numa_node[heap_number];
5184 static void set_numa_node_for_heap(int heap_number, uint16_t numa_node)
5186 heap_no_to_numa_node[heap_number] = numa_node;
5189 static void init_numa_node_to_heap_map(int nheaps)
5190 { // called right after GCHeap::Init() for each heap is finished
5191 // when numa is not enabled, heap_no_to_numa_node[] are all filled
5192 // with 0s during initialization, and will be treated as one node
5193 numa_node_to_heap_map[0] = 0;
5196 for (int i=1; i < nheaps; i++)
5198 if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1])
5199 numa_node_to_heap_map[node_index++] = (uint16_t)i;
5201 numa_node_to_heap_map[node_index] = (uint16_t)nheaps; //mark the end with nheaps
5204 static void get_heap_range_for_heap(int hn, int* start, int* end)
5205 { // 1-tier/no numa case: heap_no_to_numa_node[] all zeros,
5206 // and treated as in one node. thus: start=0, end=n_heaps
5207 uint16_t numa_node = heap_no_to_numa_node[hn];
5208 *start = (int)numa_node_to_heap_map[numa_node];
5209 *end = (int)(numa_node_to_heap_map[numa_node+1]);
5212 uint8_t* heap_select::sniff_buffer;
5213 unsigned heap_select::n_sniff_buffers;
5214 unsigned heap_select::cur_sniff_index;
5215 uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
5216 uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
5217 uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
5218 uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
5220 BOOL gc_heap::create_thread_support (unsigned number_of_heaps)
5223 if (!gc_start_event.CreateOSManualEventNoThrow (FALSE))
5227 if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE))
5231 if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc))
5242 destroy_thread_support();
5248 void gc_heap::destroy_thread_support ()
5250 if (ee_suspend_event.IsValid())
5252 ee_suspend_event.CloseEvent();
5254 if (gc_start_event.IsValid())
5256 gc_start_event.CloseEvent();
5260 void set_thread_affinity_for_heap(int heap_number)
5265 if (GCToOSInterface::GetProcessorForHeap(heap_number, &proc_no, &node_no))
5267 heap_select::set_proc_no_for_heap(heap_number, proc_no);
5268 if (node_no != NUMA_NODE_UNDEFINED)
5270 heap_select::set_numa_node_for_heap(heap_number, node_no);
5272 if (!GCToOSInterface::SetThreadAffinity(proc_no))
5274 dprintf(1, ("Failed to set thread affinity for server GC thread"));
5279 bool gc_heap::create_gc_thread ()
5281 dprintf (3, ("Creating gc thread\n"));
5282 return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC");
5286 #pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5288 void gc_heap::gc_thread_function ()
5290 assert (gc_done_event.IsValid());
5291 assert (gc_start_event.IsValid());
5292 dprintf (3, ("gc thread started"));
5294 heap_select::init_cpu_mapping(this, heap_number);
5298 assert (!gc_t_join.joined());
5300 if (heap_number == 0)
5302 gc_heap::ee_suspend_event.Wait(INFINITE, FALSE);
5304 BEGIN_TIMING(suspend_ee_during_log);
5305 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
5306 END_TIMING(suspend_ee_during_log);
5308 proceed_with_gc_p = TRUE;
5310 if (!should_proceed_with_gc())
5312 update_collection_counts_for_no_gc();
5313 proceed_with_gc_p = FALSE;
5317 settings.init_mechanisms();
5318 gc_start_event.Set();
5320 dprintf (3, ("%d gc thread waiting...", heap_number));
5324 gc_start_event.Wait(INFINITE, FALSE);
5325 dprintf (3, ("%d gc thread waiting... Done", heap_number));
5328 assert ((heap_number == 0) || proceed_with_gc_p);
5330 if (proceed_with_gc_p)
5332 garbage_collect (GCHeap::GcCondemnedGeneration);
5334 if (pm_trigger_full_gc)
5336 garbage_collect_pm_full_gc();
5340 if (heap_number == 0)
5342 if (proceed_with_gc_p && (!settings.concurrent))
5347 #ifdef BACKGROUND_GC
5348 recover_bgc_settings();
5349 #endif //BACKGROUND_GC
5351 #ifdef MULTIPLE_HEAPS
5352 for (int i = 0; i < gc_heap::n_heaps; i++)
5354 gc_heap* hp = gc_heap::g_heaps[i];
5355 hp->add_saved_spinlock_info (false, me_release, mt_block_gc);
5356 leave_spin_lock(&hp->more_space_lock_soh);
5358 #endif //MULTIPLE_HEAPS
5360 gc_heap::gc_started = FALSE;
5362 BEGIN_TIMING(restart_ee_during_log);
5363 GCToEEInterface::RestartEE(TRUE);
5364 END_TIMING(restart_ee_during_log);
5365 process_sync_log_stats();
5367 dprintf (SPINLOCK_LOG, ("GC Lgc"));
5368 leave_spin_lock (&gc_heap::gc_lock);
5370 gc_heap::internal_gc_done = true;
5372 if (proceed_with_gc_p)
5376 // If we didn't actually do a GC, it means we didn't wait up the other threads,
5377 // we still need to set the gc_done_event for those threads.
5378 for (int i = 0; i < gc_heap::n_heaps; i++)
5380 gc_heap* hp = gc_heap::g_heaps[i];
5387 int spin_count = 32 * (gc_heap::n_heaps - 1);
5389 // wait until RestartEE has progressed to a stage where we can restart user threads
5390 while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads())
5392 spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads()));
5399 #pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
5402 #endif //MULTIPLE_HEAPS
5404 bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number)
5406 #if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
5407 // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
5408 // a host. This will need to be added later.
5409 #if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE)
5410 if (!CLRMemoryHosted())
5413 if (GCToOSInterface::CanEnableGCNumaAware())
5415 uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
5416 if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
5421 UNREFERENCED_PARAMETER(h_number);
5424 //numa aware not enabled, or call failed --> fallback to VirtualCommit()
5425 return GCToOSInterface::VirtualCommit(addr, size);
5428 bool gc_heap::virtual_commit (void* address, size_t size, int h_number, bool* hard_limit_exceeded_p)
5431 assert (heap_hard_limit == 0);
5434 if (heap_hard_limit)
5436 bool exceeded_p = false;
5438 check_commit_cs.Enter();
5440 if ((current_total_committed + size) > heap_hard_limit)
5442 dprintf (1, ("%Id + %Id = %Id > limit",
5443 current_total_committed, size,
5444 (current_total_committed + size),
5451 current_total_committed += size;
5453 current_total_committed_bookkeeping += size;
5456 check_commit_cs.Leave();
5458 if (hard_limit_exceeded_p)
5459 *hard_limit_exceeded_p = exceeded_p;
5463 dprintf (1, ("can't commit %Ix for %Id bytes > HARD LIMIT %Id", (size_t)address, size, heap_hard_limit));
5468 // If it's a valid heap number it means it's commiting for memory on the GC heap.
5469 bool commit_succeeded_p = ((h_number >= 0) ?
5470 virtual_alloc_commit_for_heap (address, size, h_number) :
5471 GCToOSInterface::VirtualCommit(address, size));
5473 if (!commit_succeeded_p && heap_hard_limit)
5475 check_commit_cs.Enter();
5476 dprintf (1, ("commit failed, updating %Id to %Id",
5477 current_total_committed, (current_total_committed - size)));
5478 current_total_committed -= size;
5480 current_total_committed_bookkeeping -= size;
5482 check_commit_cs.Leave();
5485 return commit_succeeded_p;
5488 bool gc_heap::virtual_decommit (void* address, size_t size, int h_number)
5491 assert (heap_hard_limit == 0);
5494 bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size);
5496 if (decommit_succeeded_p && heap_hard_limit)
5498 check_commit_cs.Enter();
5499 current_total_committed -= size;
5501 current_total_committed_bookkeeping -= size;
5502 check_commit_cs.Leave();
5505 return decommit_succeeded_p;
5508 #ifndef SEG_MAPPING_TABLE
5510 heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
5512 uint8_t* sadd = add;
5513 heap_segment* hs = 0;
5514 heap_segment* hs1 = 0;
5515 if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
5520 //repeat in case there is a concurrent insertion in the table.
5525 seg_table->lookup (sadd);
5526 hs1 = (heap_segment*)sadd;
5527 } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));
5532 (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
5536 #endif //SEG_MAPPING_TABLE
5544 // If we want to save space we can have a pool of plug_and_gap's instead of
5545 // always having 2 allocated for each pinned plug.
5546 gap_reloc_pair saved_pre_plug;
5547 // If we decide to not compact, we need to restore the original values.
5548 gap_reloc_pair saved_pre_plug_reloc;
5550 gap_reloc_pair saved_post_plug;
5552 // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke
5553 // frames. Also if it's an artificially pinned plug created by us, it can certainly
5555 // We know these cases will be rare so we can optimize this to be only allocated on decommand.
5556 gap_reloc_pair saved_post_plug_reloc;
5558 // We need to calculate this after we are done with plan phase and before compact
5559 // phase because compact phase will change the bricks so relocate_address will no
5561 uint8_t* saved_pre_plug_info_reloc_start;
5563 // We need to save this because we will have no way to calculate it, unlike the
5564 // pre plug info start which is right before this plug.
5565 uint8_t* saved_post_plug_info_start;
5568 uint8_t* allocation_context_start_region;
5569 #endif //SHORT_PLUGS
5571 // How the bits in these bytes are organized:
5573 // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit
5574 // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0.
5579 // We are seeing this is getting corrupted for a PP with a NP after.
5580 // Save it when we first set it and make sure it doesn't change.
5581 gap_reloc_pair saved_post_plug_debug;
5584 size_t get_max_short_bits()
5586 return (sizeof (gap_reloc_pair) / sizeof (uint8_t*));
5590 size_t get_pre_short_start_bit ()
5592 return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5597 return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1)));
5600 void set_pre_short()
5602 saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1));
5605 void set_pre_short_bit (size_t bit)
5607 saved_pre_p |= 1 << (get_pre_short_start_bit() + bit);
5610 BOOL pre_short_bit_p (size_t bit)
5612 return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit)));
5615 #ifdef COLLECTIBLE_CLASS
5616 void set_pre_short_collectible()
5621 BOOL pre_short_collectible_p()
5623 return (saved_pre_p & 2);
5625 #endif //COLLECTIBLE_CLASS
5628 size_t get_post_short_start_bit ()
5630 return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
5635 return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1)));
5638 void set_post_short()
5640 saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1));
5643 void set_post_short_bit (size_t bit)
5645 saved_post_p |= 1 << (get_post_short_start_bit() + bit);
5648 BOOL post_short_bit_p (size_t bit)
5650 return (saved_post_p & (1 << (get_post_short_start_bit() + bit)));
5653 #ifdef COLLECTIBLE_CLASS
5654 void set_post_short_collectible()
5659 BOOL post_short_collectible_p()
5661 return (saved_post_p & 2);
5663 #endif //COLLECTIBLE_CLASS
5665 uint8_t* get_plug_address() { return first; }
5667 BOOL has_pre_plug_info() { return saved_pre_p; }
5668 BOOL has_post_plug_info() { return saved_post_p; }
5670 gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; }
5671 gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; }
5672 void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; }
5673 uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; }
5675 // We need to temporarily recover the shortened plugs for compact phase so we can
5676 // copy over the whole plug and their related info (mark bits/cards). But we will
5677 // need to set the artificial gap back so compact phase can keep reading the plug info.
5678 // We also need to recover the saved info because we'll need to recover it later.
5680 // So we would call swap_p*_plug_and_saved once to recover the object info; then call
5681 // it again to recover the artificial gap.
5682 void swap_pre_plug_and_saved()
5684 gap_reloc_pair temp;
5685 memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5686 memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5687 saved_pre_plug_reloc = temp;
5690 void swap_post_plug_and_saved()
5692 gap_reloc_pair temp;
5693 memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5694 memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5695 saved_post_plug_reloc = temp;
5698 void swap_pre_plug_and_saved_for_profiler()
5700 gap_reloc_pair temp;
5701 memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
5702 memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5703 saved_pre_plug = temp;
5706 void swap_post_plug_and_saved_for_profiler()
5708 gap_reloc_pair temp;
5709 memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
5710 memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5711 saved_post_plug = temp;
5714 // We should think about whether it's really necessary to have to copy back the pre plug
5715 // info since it was already copied during compacting plugs. But if a plug doesn't move
5716 // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info.
5717 void recover_plug_info()
5721 if (gc_heap::settings.compaction)
5723 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix",
5725 &saved_pre_plug_reloc,
5726 saved_pre_plug_info_reloc_start));
5727 memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
5731 dprintf (3, ("%Ix: REC Pre: %Ix-%Ix",
5734 (first - sizeof (plug_and_gap))));
5735 memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
5741 if (gc_heap::settings.compaction)
5743 dprintf (3, ("%Ix: REC Post: %Ix-%Ix",
5745 &saved_post_plug_reloc,
5746 saved_post_plug_info_start));
5747 memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
5751 dprintf (3, ("%Ix: REC Post: %Ix-%Ix",
5754 saved_post_plug_info_start));
5755 memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
5762 void gc_mechanisms::init_mechanisms()
5764 condemned_generation = 0;
5765 promotion = FALSE;//TRUE;
5767 #ifdef FEATURE_LOH_COMPACTION
5768 loh_compaction = gc_heap::loh_compaction_requested();
5770 loh_compaction = FALSE;
5771 #endif //FEATURE_LOH_COMPACTION
5772 heap_expansion = FALSE;
5775 elevation_reduced = FALSE;
5776 found_finalizers = FALSE;
5777 #ifdef BACKGROUND_GC
5778 background_p = recursive_gc_sync::background_running_p() != FALSE;
5779 allocations_allowed = TRUE;
5780 #endif //BACKGROUND_GC
5782 entry_memory_load = 0;
5783 exit_memory_load = 0;
5786 stress_induced = FALSE;
5787 #endif // STRESS_HEAP
5790 void gc_mechanisms::first_init()
5793 gen0_reduction_count = 0;
5794 should_lock_elevation = FALSE;
5795 elevation_locked_count = 0;
5796 reason = reason_empty;
5797 #ifdef BACKGROUND_GC
5798 pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch;
5800 int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode());
5801 if (debug_pause_mode >= 0)
5803 assert (debug_pause_mode <= pause_sustained_low_latency);
5804 pause_mode = (gc_pause_mode)debug_pause_mode;
5807 #else //BACKGROUND_GC
5808 pause_mode = pause_batch;
5809 #endif //BACKGROUND_GC
5814 void gc_mechanisms::record (gc_history_global* history)
5816 #ifdef MULTIPLE_HEAPS
5817 history->num_heaps = gc_heap::n_heaps;
5819 history->num_heaps = 1;
5820 #endif //MULTIPLE_HEAPS
5822 history->condemned_generation = condemned_generation;
5823 history->gen0_reduction_count = gen0_reduction_count;
5824 history->reason = reason;
5825 history->pause_mode = (int)pause_mode;
5826 history->mem_pressure = entry_memory_load;
5827 history->global_mechanims_p = 0;
5829 // start setting the boolean values.
5831 history->set_mechanism_p (global_concurrent);
5834 history->set_mechanism_p (global_compaction);
5837 history->set_mechanism_p (global_promotion);
5840 history->set_mechanism_p (global_demotion);
5843 history->set_mechanism_p (global_card_bundles);
5845 if (elevation_reduced)
5846 history->set_mechanism_p (global_elevation);
5849 /**********************************
5850 called at the beginning of GC to fix the allocated size to
5851 what is really allocated, or to turn the free area into an unused object
5852 It needs to be called after all of the other allocation contexts have been
5853 fixed since it relies on alloc_allocated.
5854 ********************************/
5856 //for_gc_p indicates that the work is being done for GC,
5857 //as opposed to concurrent heap verification
5858 void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
5860 UNREFERENCED_PARAMETER(for_gc_p);
5862 // The gen 0 alloc context is never used for allocation in the allocator path. It's
5863 // still used in the allocation path during GCs.
5864 assert (generation_allocation_pointer (youngest_generation) == nullptr);
5865 assert (generation_allocation_limit (youngest_generation) == nullptr);
5866 heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
5869 void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
5871 UNREFERENCED_PARAMETER(for_gc_p);
5874 alloc_context* acontext =
5876 generation_alloc_context (large_object_generation);
5877 assert (acontext->alloc_ptr == 0);
5878 assert (acontext->alloc_limit == 0);
5880 dprintf (3, ("Large object alloc context: ptr: %Ix, limit %Ix",
5881 (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5882 fix_allocation_context (acontext, FALSE, get_alignment_constant (FALSE));
5885 acontext->alloc_ptr = 0;
5886 acontext->alloc_limit = acontext->alloc_ptr;
5891 //for_gc_p indicates that the work is being done for GC,
5892 //as opposed to concurrent heap verification
5893 void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
5896 dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix",
5898 (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
5900 if (((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) ||
5903 uint8_t* point = acontext->alloc_ptr;
5906 size_t size = (acontext->alloc_limit - acontext->alloc_ptr);
5907 // the allocation area was from the free list
5908 // it was shortened by Align (min_obj_size) to make room for
5909 // at least the shortest unused object
5910 size += Align (min_obj_size, align_const);
5911 assert ((size >= Align (min_obj_size)));
5913 dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point,
5914 (size_t)point + size ));
5915 make_unused_array (point, size);
5919 generation_free_obj_space (generation_of (0)) += size;
5920 alloc_contexts_used ++;
5926 alloc_allocated = acontext->alloc_ptr;
5927 assert (heap_segment_allocated (ephemeral_heap_segment) <=
5928 heap_segment_committed (ephemeral_heap_segment));
5929 alloc_contexts_used ++;
5934 // We need to update the alloc_bytes to reflect the portion that we have not used
5935 acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr);
5936 acontext->alloc_ptr = 0;
5937 acontext->alloc_limit = acontext->alloc_ptr;
5941 //used by the heap verification for concurrent gc.
5942 //it nulls out the words set by fix_allocation_context for heap_verification
5943 void repair_allocation (gc_alloc_context* acontext, void*)
5945 uint8_t* point = acontext->alloc_ptr;
5949 dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5950 (size_t)acontext->alloc_limit+Align(min_obj_size)));
5951 memclr (acontext->alloc_ptr - plug_skew,
5952 (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size));
5956 void void_allocation (gc_alloc_context* acontext, void*)
5958 uint8_t* point = acontext->alloc_ptr;
5962 dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
5963 (size_t)acontext->alloc_limit+Align(min_obj_size)));
5964 acontext->alloc_ptr = 0;
5965 acontext->alloc_limit = acontext->alloc_ptr;
5969 void gc_heap::repair_allocation_contexts (BOOL repair_p)
5971 GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL);
5974 struct fix_alloc_context_args
5980 void fix_alloc_context (gc_alloc_context* acontext, void* param)
5982 fix_alloc_context_args* args = (fix_alloc_context_args*)param;
5983 g_theGCHeap->FixAllocContext(acontext, (void*)(size_t)(args->for_gc_p), args->heap);
5986 void gc_heap::fix_allocation_contexts (BOOL for_gc_p)
5988 fix_alloc_context_args args;
5989 args.for_gc_p = for_gc_p;
5992 GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
5993 fix_youngest_allocation_area(for_gc_p);
5994 fix_large_allocation_area(for_gc_p);
5997 void gc_heap::fix_older_allocation_area (generation* older_gen)
5999 heap_segment* older_gen_seg = generation_allocation_segment (older_gen);
6000 if (generation_allocation_limit (older_gen) !=
6001 heap_segment_plan_allocated (older_gen_seg))
6003 uint8_t* point = generation_allocation_pointer (older_gen);
6005 size_t size = (generation_allocation_limit (older_gen) -
6006 generation_allocation_pointer (older_gen));
6009 assert ((size >= Align (min_obj_size)));
6010 dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size));
6011 make_unused_array (point, size);
6012 if (size >= min_free_list)
6014 generation_allocator (older_gen)->thread_item_front (point, size);
6015 add_gen_free (older_gen->gen_num, size);
6016 generation_free_list_space (older_gen) += size;
6020 generation_free_obj_space (older_gen) += size;
6026 assert (older_gen_seg != ephemeral_heap_segment);
6027 heap_segment_plan_allocated (older_gen_seg) =
6028 generation_allocation_pointer (older_gen);
6029 generation_allocation_limit (older_gen) =
6030 generation_allocation_pointer (older_gen);
6033 generation_allocation_pointer (older_gen) = 0;
6034 generation_allocation_limit (older_gen) = 0;
6037 void gc_heap::set_allocation_heap_segment (generation* gen)
6039 uint8_t* p = generation_allocation_start (gen);
6041 heap_segment* seg = generation_allocation_segment (gen);
6042 if (in_range_for_segment (p, seg))
6045 // try ephemeral heap segment in case of heap expansion
6046 seg = ephemeral_heap_segment;
6047 if (!in_range_for_segment (p, seg))
6049 seg = heap_segment_rw (generation_start_segment (gen));
6051 PREFIX_ASSUME(seg != NULL);
6053 while (!in_range_for_segment (p, seg))
6055 seg = heap_segment_next_rw (seg);
6056 PREFIX_ASSUME(seg != NULL);
6060 generation_allocation_segment (gen) = seg;
6063 void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
6066 assert (Align ((size_t)start) == (size_t)start);
6067 generation_allocation_start (gen) = start;
6068 generation_allocation_pointer (gen) = 0;//start + Align (min_obj_size);
6069 generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen);
6070 set_allocation_heap_segment (gen);
6073 #ifdef BACKGROUND_GC
6074 //TODO BACKGROUND_GC this is for test only
6076 gc_heap::disallow_new_allocation (int gen_number)
6078 UNREFERENCED_PARAMETER(gen_number);
6079 settings.allocations_allowed = FALSE;
6082 gc_heap::allow_new_allocation (int gen_number)
6084 UNREFERENCED_PARAMETER(gen_number);
6085 settings.allocations_allowed = TRUE;
6088 #endif //BACKGROUND_GC
6090 bool gc_heap::new_allocation_allowed (int gen_number)
6092 #ifdef BACKGROUND_GC
6093 //TODO BACKGROUND_GC this is for test only
6094 if (!settings.allocations_allowed)
6096 dprintf (2, ("new allocation not allowed"));
6099 #endif //BACKGROUND_GC
6101 if (dd_new_allocation (dynamic_data_of (gen_number)) < 0)
6103 if (gen_number != 0)
6105 // For LOH we will give it more budget before we try a GC.
6106 if (settings.concurrent)
6108 dynamic_data* dd2 = dynamic_data_of (max_generation + 1 );
6110 if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2)))
6118 #ifndef MULTIPLE_HEAPS
6119 else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0))
6121 dprintf (3, ("evaluating allocation rate"));
6122 dynamic_data* dd0 = dynamic_data_of (0);
6123 if ((allocation_running_amount - dd_new_allocation (dd0)) >
6126 uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
6127 if ((ctime - allocation_running_time) > 1000)
6129 dprintf (2, (">1s since last gen0 gc"));
6134 allocation_running_amount = dd_new_allocation (dd0);
6138 #endif //MULTIPLE_HEAPS
6143 ptrdiff_t gc_heap::get_desired_allocation (int gen_number)
6145 return dd_desired_allocation (dynamic_data_of (gen_number));
6149 ptrdiff_t gc_heap::get_new_allocation (int gen_number)
6151 return dd_new_allocation (dynamic_data_of (gen_number));
6154 //return the amount allocated so far in gen_number
6156 ptrdiff_t gc_heap::get_allocation (int gen_number)
6158 dynamic_data* dd = dynamic_data_of (gen_number);
6160 return dd_desired_allocation (dd) - dd_new_allocation (dd);
6164 BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len)
6166 size_t new_size = max (init_len, 2*len);
6167 mark* tmp = new (nothrow) mark [new_size];
6170 memcpy (tmp, m, len * sizeof (mark));
6178 dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark))));
6184 uint8_t* pinned_plug (mark* m)
6190 size_t& pinned_len (mark* m)
6196 void set_new_pin_info (mark* m, uint8_t* pin_free_space_start)
6198 m->len = pinned_plug (m) - pin_free_space_start;
6200 m->allocation_context_start_region = pin_free_space_start;
6201 #endif //SHORT_PLUGS
6206 uint8_t*& pin_allocation_context_start_region (mark* m)
6208 return m->allocation_context_start_region;
6211 uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry)
6213 uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info());
6214 uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap)));
6215 //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix",
6216 // old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6217 dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
6218 return plug_start_in_saved;
6222 void set_padding_in_expand (uint8_t* old_loc,
6223 BOOL set_padding_on_saved_p,
6224 mark* pinned_plug_entry)
6226 if (set_padding_on_saved_p)
6228 set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6232 set_plug_padded (old_loc);
6237 void clear_padding_in_expand (uint8_t* old_loc,
6238 BOOL set_padding_on_saved_p,
6239 mark* pinned_plug_entry)
6241 if (set_padding_on_saved_p)
6243 clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
6247 clear_plug_padded (old_loc);
6250 #endif //SHORT_PLUGS
6252 void gc_heap::reset_pinned_queue()
6258 void gc_heap::reset_pinned_queue_bos()
6263 // last_pinned_plug is only for asserting purpose.
6264 void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size)
6266 if (last_pinned_plug)
6268 mark& last_m = mark_stack_array[mark_stack_tos - 1];
6269 assert (last_pinned_plug == last_m.first);
6270 if (last_m.saved_post_p)
6272 last_m.saved_post_p = FALSE;
6273 dprintf (3, ("setting last plug %Ix post to false", last_m.first));
6274 // We need to recover what the gap has overwritten.
6275 memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair));
6277 last_m.len += plug_size;
6278 dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len));
6282 void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6284 dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
6285 dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
6286 if (!(pinned_plug_que_empty_p()))
6288 mark* oldest_entry = oldest_pin();
6289 uint8_t* plug = pinned_plug (oldest_entry);
6290 if ((plug >= alloc_pointer) && (plug < alloc_limit))
6292 alloc_limit = pinned_plug (oldest_entry);
6293 dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
6294 alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
6299 void gc_heap::set_allocator_next_pin (generation* gen)
6301 dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
6302 if (!(pinned_plug_que_empty_p()))
6304 mark* oldest_entry = oldest_pin();
6305 uint8_t* plug = pinned_plug (oldest_entry);
6306 if ((plug >= generation_allocation_pointer (gen)) &&
6307 (plug < generation_allocation_limit (gen)))
6309 generation_allocation_limit (gen) = pinned_plug (oldest_entry);
6310 dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)",
6312 generation_allocation_pointer (gen), generation_allocation_limit (gen),
6313 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
6316 assert (!((plug < generation_allocation_pointer (gen)) &&
6317 (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
6321 // After we set the info, we increase tos.
6322 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
6324 UNREFERENCED_PARAMETER(last_pinned_plug);
6326 mark& m = mark_stack_array[mark_stack_tos];
6327 assert (m.first == last_pinned_plug);
6331 set_allocator_next_pin (alloc_pointer, alloc_limit);
6334 // After we set the info, we increase tos.
6335 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
6337 UNREFERENCED_PARAMETER(last_pinned_plug);
6339 mark& m = mark_stack_array[mark_stack_tos];
6340 assert (m.first == last_pinned_plug);
6345 // Why are we checking here? gen is never 0.
6348 set_allocator_next_pin (gen);
6352 size_t gc_heap::deque_pinned_plug ()
6354 dprintf (3, ("dequed: %Id", mark_stack_bos));
6355 size_t m = mark_stack_bos;
6361 mark* gc_heap::pinned_plug_of (size_t bos)
6363 return &mark_stack_array [ bos ];
6367 mark* gc_heap::oldest_pin ()
6369 return pinned_plug_of (mark_stack_bos);
6373 BOOL gc_heap::pinned_plug_que_empty_p ()
6375 return (mark_stack_bos == mark_stack_tos);
6379 mark* gc_heap::before_oldest_pin()
6381 if (mark_stack_bos >= 1)
6382 return pinned_plug_of (mark_stack_bos-1);
6388 BOOL gc_heap::ephemeral_pointer_p (uint8_t* o)
6390 return ((o >= ephemeral_low) && (o < ephemeral_high));
6395 int& gc_heap::mark_stack_busy()
6397 return g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
6401 void gc_heap::make_mark_stack (mark* arr)
6403 reset_pinned_queue();
6404 mark_stack_array = arr;
6405 mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6407 mark_stack_busy() = 0;
6411 #ifdef BACKGROUND_GC
6413 size_t& gc_heap::bpromoted_bytes(int thread)
6415 #ifdef MULTIPLE_HEAPS
6416 return g_bpromoted [thread*16];
6417 #else //MULTIPLE_HEAPS
6418 UNREFERENCED_PARAMETER(thread);
6420 #endif //MULTIPLE_HEAPS
6423 void gc_heap::make_background_mark_stack (uint8_t** arr)
6425 background_mark_stack_array = arr;
6426 background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
6427 background_mark_stack_tos = arr;
6430 void gc_heap::make_c_mark_list (uint8_t** arr)
6433 c_mark_list_index = 0;
6434 c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE);
6436 #endif //BACKGROUND_GC
6441 // The card bundle keeps track of groups of card words.
6442 static const size_t card_bundle_word_width = 32;
6444 // How do we express the fact that 32 bits (card_word_width) is one uint32_t?
6445 static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width));
6448 size_t card_bundle_word (size_t cardb)
6450 return cardb / card_bundle_word_width;
6454 uint32_t card_bundle_bit (size_t cardb)
6456 return (uint32_t)(cardb % card_bundle_word_width);
6459 size_t align_cardw_on_bundle (size_t cardw)
6461 return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
6464 // Get the card bundle representing a card word
6465 size_t cardw_card_bundle (size_t cardw)
6467 return cardw / card_bundle_size;
6470 // Get the first card word in a card bundle
6471 size_t card_bundle_cardw (size_t cardb)
6473 return cardb * card_bundle_size;
6476 // Clear the specified card bundle
6477 void gc_heap::card_bundle_clear (size_t cardb)
6479 card_bundle_table [card_bundle_word (cardb)] &= ~(1 << card_bundle_bit (cardb));
6480 dprintf (2, ("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
6481 (size_t)card_bundle_cardw (cardb+1)));
6484 void gc_heap::card_bundle_set (size_t cardb)
6486 if (!card_bundle_set_p (cardb))
6488 card_bundle_table [card_bundle_word (cardb)] |= (1 << card_bundle_bit (cardb));
6492 // Set the card bundle bits between start_cardb and end_cardb
6493 void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
6495 if (start_cardb == end_cardb)
6497 card_bundle_set(start_cardb);
6501 size_t start_word = card_bundle_word (start_cardb);
6502 size_t end_word = card_bundle_word (end_cardb);
6504 if (start_word < end_word)
6506 // Set the partial words
6507 card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));
6509 if (card_bundle_bit (end_cardb))
6510 card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));
6512 // Set the full words
6513 for (size_t i = start_word + 1; i < end_word; i++)
6514 card_bundle_table [i] = ~0u;
6518 card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
6519 lowbits (~0u, card_bundle_bit (end_cardb)));
6523 // Indicates whether the specified bundle is set.
6524 BOOL gc_heap::card_bundle_set_p (size_t cardb)
6526 return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb)));
6529 // Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end'
6530 size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
6532 // Number of heap bytes represented by a card bundle word
6533 size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6535 // Align the start of the region down
6536 from = (uint8_t*)((size_t)from & ~(cbw_span - 1));
6538 // Align the end of the region up
6539 end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1));
6541 // Make sure they're really aligned
6542 assert (((size_t)from & (cbw_span - 1)) == 0);
6543 assert (((size_t)end & (cbw_span - 1)) == 0);
6545 return ((end - from) / cbw_span) * sizeof (uint32_t);
6548 // Takes a pointer to a card bundle table and an address, and returns a pointer that represents
6549 // where a theoretical card bundle table that represents every address (starting from 0) would
6550 // start if the bundle word representing the address were to be located at the pointer passed in.
6551 // The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle
6552 // for a given address is using a simple shift operation on the address.
6553 uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address)
6555 // The number of bytes of heap memory represented by a card bundle word
6556 const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width;
6558 // Each card bundle word is 32 bits
6559 return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t)));
6562 void gc_heap::enable_card_bundles ()
6564 if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
6566 dprintf (1, ("Enabling card bundles"));
6568 // We initially set all of the card bundles
6569 card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
6570 cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
6571 settings.card_bundles = TRUE;
6575 BOOL gc_heap::card_bundles_enabled ()
6577 return settings.card_bundles;
6580 #endif // CARD_BUNDLE
6582 #if defined (_TARGET_AMD64_)
6583 #define brick_size ((size_t)4096)
6585 #define brick_size ((size_t)2048)
6586 #endif //_TARGET_AMD64_
6589 size_t gc_heap::brick_of (uint8_t* add)
6591 return (size_t)(add - lowest_address) / brick_size;
6595 uint8_t* gc_heap::brick_address (size_t brick)
6597 return lowest_address + (brick_size * brick);
6601 void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end)
6603 for (size_t i = brick_of (from);i < brick_of (end); i++)
6607 //codes for the brick entries:
6608 //entry == 0 -> not assigned
6609 //entry >0 offset is entry-1
6610 //entry <0 jump back entry bricks
6614 void gc_heap::set_brick (size_t index, ptrdiff_t val)
6620 assert (val < 32767);
6622 brick_table [index] = (short)val+1;
6624 brick_table [index] = (short)val;
6628 int gc_heap::get_brick_entry (size_t index)
6630 #ifdef MULTIPLE_HEAPS
6631 return VolatileLoadWithoutBarrier(&brick_table [index]);
6633 return brick_table[index];
6639 uint8_t* align_on_brick (uint8_t* add)
6641 return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1));
6645 uint8_t* align_lower_brick (uint8_t* add)
6647 return (uint8_t*)(((size_t)add) & ~(brick_size - 1));
6650 size_t size_brick_of (uint8_t* from, uint8_t* end)
6652 assert (((size_t)from & (brick_size-1)) == 0);
6653 assert (((size_t)end & (brick_size-1)) == 0);
6655 return ((end - from) / brick_size) * sizeof (short);
6659 uint8_t* gc_heap::card_address (size_t card)
6661 return (uint8_t*) (card_size * card);
6665 size_t gc_heap::card_of ( uint8_t* object)
6667 return (size_t)(object) / card_size;
6671 size_t gc_heap::card_to_brick (size_t card)
6673 return brick_of (card_address (card));
6677 uint8_t* align_on_card (uint8_t* add)
6679 return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 ));
6682 uint8_t* align_on_card_word (uint8_t* add)
6684 return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1));
6688 uint8_t* align_lower_card (uint8_t* add)
6690 return (uint8_t*)((size_t)add & ~(card_size-1));
6694 void gc_heap::clear_card (size_t card)
6696 card_table [card_word (card)] =
6697 (card_table [card_word (card)] & ~(1 << card_bit (card)));
6698 dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card),
6699 (size_t)card_address (card+1)));
6703 void gc_heap::set_card (size_t card)
6705 size_t word = card_word (card);
6706 card_table[word] = (card_table [word] | (1 << card_bit (card)));
6708 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
6709 // Also set the card bundle that corresponds to the card
6710 size_t bundle_to_set = cardw_card_bundle(word);
6712 card_bundle_set(bundle_to_set);
6714 dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set));
6715 assert(card_bundle_set_p(bundle_to_set) != 0);
6720 BOOL gc_heap::card_set_p (size_t card)
6722 return ( card_table [ card_word (card) ] & (1 << card_bit (card)));
6725 // Returns the number of DWORDs in the card table that cover the
6726 // range of addresses [from, end[.
6727 size_t count_card_of (uint8_t* from, uint8_t* end)
6729 return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1;
6732 // Returns the number of bytes to allocate for a card table
6733 // that covers the range of addresses [from, end[.
6734 size_t size_card_of (uint8_t* from, uint8_t* end)
6736 return count_card_of (from, end) * sizeof(uint32_t);
6739 // We don't store seg_mapping_table in card_table_info because there's only always one view.
6740 class card_table_info
6744 uint8_t* lowest_address;
6745 uint8_t* highest_address;
6749 uint32_t* card_bundle_table;
6750 #endif //CARD_BUNDLE
6752 // mark_array is always at the end of the data structure because we
6753 // want to be able to make one commit call for everything before it.
6755 uint32_t* mark_array;
6759 uint32_t* next_card_table;
6762 //These are accessors on untranslated cardtable
6764 unsigned& card_table_refcount (uint32_t* c_table)
6766 return *(unsigned*)((char*)c_table - sizeof (card_table_info));
6770 uint8_t*& card_table_lowest_address (uint32_t* c_table)
6772 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address;
6775 uint32_t* translate_card_table (uint32_t* ct)
6777 return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t));
6781 uint8_t*& card_table_highest_address (uint32_t* c_table)
6783 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address;
6787 short*& card_table_brick_table (uint32_t* c_table)
6789 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table;
6794 uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
6796 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table;
6798 #endif //CARD_BUNDLE
6801 /* Support for mark_array */
6804 uint32_t*& card_table_mark_array (uint32_t* c_table)
6806 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array;
6810 #define mark_bit_pitch ((size_t)16)
6812 #define mark_bit_pitch ((size_t)8)
6814 #define mark_word_width ((size_t)32)
6815 #define mark_word_size (mark_word_width * mark_bit_pitch)
6818 uint8_t* align_on_mark_bit (uint8_t* add)
6820 return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1));
6824 uint8_t* align_lower_mark_bit (uint8_t* add)
6826 return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1));
6830 BOOL is_aligned_on_mark_word (uint8_t* add)
6832 return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1)));
6836 uint8_t* align_on_mark_word (uint8_t* add)
6838 return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1));
6842 uint8_t* align_lower_mark_word (uint8_t* add)
6844 return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1));
6848 size_t mark_bit_of (uint8_t* add)
6850 return ((size_t)add / mark_bit_pitch);
6854 unsigned int mark_bit_bit (size_t mark_bit)
6856 return (unsigned int)(mark_bit % mark_word_width);
6860 size_t mark_bit_word (size_t mark_bit)
6862 return (mark_bit / mark_word_width);
6866 size_t mark_word_of (uint8_t* add)
6868 return ((size_t)add) / mark_word_size;
6871 uint8_t* mark_word_address (size_t wd)
6873 return (uint8_t*)(wd*mark_word_size);
6876 uint8_t* mark_bit_address (size_t mark_bit)
6878 return (uint8_t*)(mark_bit*mark_bit_pitch);
6882 size_t mark_bit_bit_of (uint8_t* add)
6884 return (((size_t)add / mark_bit_pitch) % mark_word_width);
6888 unsigned int gc_heap::mark_array_marked(uint8_t* add)
6890 return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add));
6894 BOOL gc_heap::is_mark_bit_set (uint8_t* add)
6896 return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)));
6900 void gc_heap::mark_array_set_marked (uint8_t* add)
6902 size_t index = mark_word_of (add);
6903 uint32_t val = (1 << mark_bit_bit_of (add));
6904 #ifdef MULTIPLE_HEAPS
6905 Interlocked::Or (&(mark_array [index]), val);
6907 mark_array [index] |= val;
6912 void gc_heap::mark_array_clear_marked (uint8_t* add)
6914 mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add));
6917 size_t size_mark_array_of (uint8_t* from, uint8_t* end)
6919 assert (((size_t)from & ((mark_word_size)-1)) == 0);
6920 assert (((size_t)end & ((mark_word_size)-1)) == 0);
6921 return sizeof (uint32_t)*(((end - from) / mark_word_size));
6924 //In order to eliminate the lowest_address in the mark array
6925 //computations (mark_word_of, etc) mark_array is offset
6926 // according to the lowest_address.
6927 uint32_t* translate_mark_array (uint32_t* ma)
6929 return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
6932 // from and end must be page aligned addresses.
6933 void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/
6934 #ifdef FEATURE_BASICFREEZE
6935 , BOOL read_only/*=FALSE*/
6936 #endif // FEATURE_BASICFREEZE
6939 if(!gc_can_use_concurrent)
6942 #ifdef FEATURE_BASICFREEZE
6944 #endif // FEATURE_BASICFREEZE
6946 assert (from == align_on_mark_word (from));
6948 assert (end == align_on_mark_word (end));
6950 #ifdef BACKGROUND_GC
6951 uint8_t* current_lowest_address = background_saved_lowest_address;
6952 uint8_t* current_highest_address = background_saved_highest_address;
6954 uint8_t* current_lowest_address = lowest_address;
6955 uint8_t* current_highest_address = highest_address;
6956 #endif //BACKGROUND_GC
6958 //there is a possibility of the addresses to be
6959 //outside of the covered range because of a newly allocated
6960 //large object segment
6961 if ((end <= current_highest_address) && (from >= current_lowest_address))
6963 size_t beg_word = mark_word_of (align_on_mark_word (from));
6964 MAYBE_UNUSED_VAR(beg_word);
6965 //align end word to make sure to cover the address
6966 size_t end_word = mark_word_of (align_on_mark_word (end));
6967 MAYBE_UNUSED_VAR(end_word);
6968 dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
6969 (size_t)mark_word_address (beg_word),
6970 (size_t)mark_word_address (end_word),
6971 (size_t)from, (size_t)end,
6972 (check_only ? "check_only" : "clear")));
6976 while (op < mark_word_address (beg_word))
6978 mark_array_clear_marked (op);
6979 op += mark_bit_pitch;
6982 memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t));
6987 //Beware, it is assumed that the mark array word straddling
6988 //start has been cleared before
6989 //verify that the array is empty.
6990 size_t markw = mark_word_of (align_on_mark_word (from));
6991 size_t markw_end = mark_word_of (align_on_mark_word (end));
6992 while (markw < markw_end)
6994 assert (!(mark_array [markw]));
6997 uint8_t* p = mark_word_address (markw_end);
7000 assert (!(mark_array_marked (p)));
7009 //These work on untranslated card tables
7011 uint32_t*& card_table_next (uint32_t* c_table)
7013 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
7017 size_t& card_table_size (uint32_t* c_table)
7019 return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
7022 void own_card_table (uint32_t* c_table)
7024 card_table_refcount (c_table) += 1;
7027 void destroy_card_table (uint32_t* c_table);
7029 void delete_next_card_table (uint32_t* c_table)
7031 uint32_t* n_table = card_table_next (c_table);
7034 if (card_table_next (n_table))
7036 delete_next_card_table (n_table);
7038 if (card_table_refcount (n_table) == 0)
7040 destroy_card_table (n_table);
7041 card_table_next (c_table) = 0;
7046 void release_card_table (uint32_t* c_table)
7048 assert (card_table_refcount (c_table) >0);
7049 card_table_refcount (c_table) -= 1;
7050 if (card_table_refcount (c_table) == 0)
7052 delete_next_card_table (c_table);
7053 if (card_table_next (c_table) == 0)
7055 destroy_card_table (c_table);
7056 // sever the link from the parent
7057 if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
7059 g_gc_card_table = 0;
7061 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7062 g_gc_card_bundle_table = 0;
7064 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7065 SoftwareWriteWatch::StaticClose();
7066 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7070 uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
7073 while (p_table && (card_table_next (p_table) != c_table))
7074 p_table = card_table_next (p_table);
7075 card_table_next (p_table) = 0;
7082 void destroy_card_table (uint32_t* c_table)
7084 // delete (uint32_t*)&card_table_refcount(c_table);
7086 GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
7087 dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
7090 uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
7092 assert (g_gc_lowest_address == start);
7093 assert (g_gc_highest_address == end);
7095 uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7097 size_t bs = size_brick_of (start, end);
7098 size_t cs = size_card_of (start, end);
7100 size_t ms = (gc_can_use_concurrent ?
7101 size_mark_array_of (start, end) :
7110 if (can_use_write_watch_for_card_table())
7112 cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
7113 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7114 // If we're not manually managing the card bundles, we will need to use OS write
7115 // watch APIs over this region to track changes.
7116 virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7119 #endif //CARD_BUNDLE
7122 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7123 size_t sw_ww_table_offset = 0;
7124 if (gc_can_use_concurrent)
7126 size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7127 sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7128 wws = sw_ww_table_offset - sw_ww_size_before_table + SoftwareWriteWatch::GetTableByteSize(start, end);
7130 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7132 #ifdef GROWABLE_SEG_MAPPING_TABLE
7133 size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7134 size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7135 size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7137 st += (st_table_offset_aligned - st_table_offset);
7138 #else //GROWABLE_SEG_MAPPING_TABLE
7140 #endif //GROWABLE_SEG_MAPPING_TABLE
7142 // it is impossible for alloc_size to overflow due bounds on each of
7144 size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7145 uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7150 dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[",
7151 alloc_size, (size_t)mem, (size_t)(mem+alloc_size)));
7153 // mark array will be committed separately (per segment).
7154 size_t commit_size = alloc_size - ms;
7156 if (!virtual_commit (mem, commit_size))
7158 dprintf (1, ("Card table commit failed"));
7159 GCToOSInterface::VirtualRelease (mem, alloc_size);
7163 // initialize the ref count
7164 uint32_t* ct = (uint32_t*)(mem+sizeof (card_table_info));
7165 card_table_refcount (ct) = 0;
7166 card_table_lowest_address (ct) = start;
7167 card_table_highest_address (ct) = end;
7168 card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
7169 card_table_size (ct) = alloc_size;
7170 card_table_next (ct) = 0;
7173 card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7175 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7176 g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address);
7179 #endif //CARD_BUNDLE
7181 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7182 if (gc_can_use_concurrent)
7184 SoftwareWriteWatch::InitializeUntranslatedTable(mem + sw_ww_table_offset, start);
7186 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7188 #ifdef GROWABLE_SEG_MAPPING_TABLE
7189 seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7190 seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
7191 size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
7192 #endif //GROWABLE_SEG_MAPPING_TABLE
7195 if (gc_can_use_concurrent)
7196 card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7198 card_table_mark_array (ct) = NULL;
7201 return translate_card_table(ct);
7204 void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p)
7206 #ifdef MULTIPLE_HEAPS
7207 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
7209 gc_heap* hp = gc_heap::g_heaps [hn];
7210 hp->fgm_result.set_fgm (f, s, loh_p);
7212 #else //MULTIPLE_HEAPS
7213 fgm_result.set_fgm (f, s, loh_p);
7214 #endif //MULTIPLE_HEAPS
7217 //returns 0 for success, -1 otherwise
7218 // We are doing all the decommitting here because we want to make sure we have
7219 // enough memory to do so - if we do this during copy_brick_card_table and
7220 // and fail to decommit it would make the failure case very complicated to
7221 // handle. This way we can waste some decommit if we call this multiple
7222 // times before the next FGC but it's easier to handle the failure case.
7223 int gc_heap::grow_brick_card_tables (uint8_t* start,
7226 heap_segment* new_seg,
7230 uint8_t* la = g_gc_lowest_address;
7231 uint8_t* ha = g_gc_highest_address;
7232 uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
7233 uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
7234 seg_mapping* new_seg_mapping_table = nullptr;
7235 #ifdef BACKGROUND_GC
7236 // This value is only for logging purpose - it's not necessarily exactly what we
7237 // would commit for mark array but close enough for diagnostics purpose.
7238 size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size);
7239 #endif //BACKGROUND_GC
7241 // See if the address is already covered
7242 if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address))
7245 //modify the higest address so the span covered
7246 //is twice the previous one.
7247 uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit());
7248 // On non-Windows systems, we get only an approximate value that can possibly be
7249 // slightly lower than the saved_g_highest_address.
7250 // In such case, we set the top to the saved_g_highest_address so that the
7251 // card and brick tables always cover the whole new range.
7252 if (top < saved_g_highest_address)
7254 top = saved_g_highest_address;
7258 if (ps > (uint64_t)200*1024*1024*1024)
7259 ps += (uint64_t)100*1024*1024*1024;
7264 if (saved_g_lowest_address < g_gc_lowest_address)
7266 if (ps > (size_t)g_gc_lowest_address)
7267 saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE;
7270 assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
7271 saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
7275 if (saved_g_highest_address > g_gc_highest_address)
7277 saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
7278 if (saved_g_highest_address > top)
7279 saved_g_highest_address = top;
7282 dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[",
7283 (size_t)saved_g_lowest_address,
7284 (size_t)saved_g_highest_address));
7286 bool write_barrier_updated = false;
7287 uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
7288 uint32_t* saved_g_card_table = g_gc_card_table;
7290 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7291 uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table;
7295 uint32_t* translated_ct = 0;
7298 size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
7299 size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);
7302 size_t ms = (gc_heap::gc_can_use_concurrent ?
7303 size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
7312 if (can_use_write_watch_for_card_table())
7314 cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
7316 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7317 // If we're not manually managing the card bundles, we will need to use OS write
7318 // watch APIs over this region to track changes.
7319 virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
7322 #endif //CARD_BUNDLE
7325 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7326 size_t sw_ww_table_offset = 0;
7327 if (gc_can_use_concurrent)
7329 size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
7330 sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
7332 sw_ww_table_offset -
7333 sw_ww_size_before_table +
7334 SoftwareWriteWatch::GetTableByteSize(saved_g_lowest_address, saved_g_highest_address);
7336 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7338 #ifdef GROWABLE_SEG_MAPPING_TABLE
7339 size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
7340 size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
7341 size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
7342 st += (st_table_offset_aligned - st_table_offset);
7343 #else //GROWABLE_SEG_MAPPING_TABLE
7345 #endif //GROWABLE_SEG_MAPPING_TABLE
7347 // it is impossible for alloc_size to overflow due bounds on each of
7349 size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
7350 dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
7351 cs, bs, cb, wws, st, ms));
7353 uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);
7357 set_fgm_result (fgm_grow_table, alloc_size, loh_p);
7361 dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[",
7362 alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size)));
7365 // mark array will be committed separately (per segment).
7366 size_t commit_size = alloc_size - ms;
7368 if (!virtual_commit (mem, commit_size))
7370 dprintf (GC_TABLE_LOG, ("Table commit failed"));
7371 set_fgm_result (fgm_commit_table, commit_size, loh_p);
7376 ct = (uint32_t*)(mem + sizeof (card_table_info));
7377 card_table_refcount (ct) = 0;
7378 card_table_lowest_address (ct) = saved_g_lowest_address;
7379 card_table_highest_address (ct) = saved_g_highest_address;
7380 card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];
7382 //clear the card table
7384 memclr ((uint8_t*)ct,
7385 (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) /
7386 (card_size * card_word_width))
7387 + sizeof (uint32_t)));
7390 bt = (short*)((uint8_t*)ct + cs);
7392 // No initialization needed, will be done in copy_brick_card
7394 card_table_brick_table (ct) = bt;
7397 card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
7398 //set all bundle to look at all of the cards
7399 memset(card_table_card_bundle_table (ct), 0xFF, cb);
7400 #endif //CARD_BUNDLE
7402 #ifdef GROWABLE_SEG_MAPPING_TABLE
7404 new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
7405 new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
7406 size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
7407 memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7408 &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
7409 size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
7411 // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
7412 // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
7413 // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
7414 // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
7415 // if an OOM occurs.
7417 #endif //GROWABLE_SEG_MAPPING_TABLE
7420 if(gc_can_use_concurrent)
7421 card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
7423 card_table_mark_array (ct) = NULL;
7426 translated_ct = translate_card_table (ct);
7428 dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix",
7429 (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct)));
7431 #ifdef BACKGROUND_GC
7432 if (hp->should_commit_mark_array())
7434 dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)",
7435 saved_g_lowest_address, saved_g_highest_address,
7436 card_table_mark_array (ct),
7437 translate_mark_array (card_table_mark_array (ct))));
7438 uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address));
7439 if (!commit_new_mark_array_global (new_mark_array))
7441 dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments"));
7442 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7446 if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address))
7448 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg"));
7449 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7455 clear_commit_flag_global();
7457 #endif //BACKGROUND_GC
7459 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7460 if (gc_can_use_concurrent)
7462 // The current design of software write watch requires that the runtime is suspended during resize. Suspending
7463 // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch().
7464 // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old
7465 // table info lazily as done for card tables.
7467 // Either this thread was the thread that did the suspension which means we are suspended; or this is called
7468 // from a GC thread which means we are in a blocking GC and also suspended.
7469 bool is_runtime_suspended = GCToEEInterface::IsGCThread();
7470 if (!is_runtime_suspended)
7472 // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
7473 // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
7474 // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
7475 // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
7476 // g_gc_highest_address.
7480 g_gc_card_table = translated_ct;
7482 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7483 g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7486 SoftwareWriteWatch::SetResizedUntranslatedTable(
7487 mem + sw_ww_table_offset,
7488 saved_g_lowest_address,
7489 saved_g_highest_address);
7491 seg_mapping_table = new_seg_mapping_table;
7493 // Since the runtime is already suspended, update the write barrier here as well.
7494 // This passes a bool telling whether we need to switch to the post
7495 // grow version of the write barrier. This test tells us if the new
7496 // segment was allocated at a lower address than the old, requiring
7497 // that we start doing an upper bounds check in the write barrier.
7498 g_gc_lowest_address = saved_g_lowest_address;
7499 g_gc_highest_address = saved_g_highest_address;
7500 stomp_write_barrier_resize(true, la != saved_g_lowest_address);
7501 write_barrier_updated = true;
7503 if (!is_runtime_suspended)
7509 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
7511 g_gc_card_table = translated_ct;
7513 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7514 g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
7518 if (!write_barrier_updated)
7520 seg_mapping_table = new_seg_mapping_table;
7521 GCToOSInterface::FlushProcessWriteBuffers();
7522 g_gc_lowest_address = saved_g_lowest_address;
7523 g_gc_highest_address = saved_g_highest_address;
7525 // This passes a bool telling whether we need to switch to the post
7526 // grow version of the write barrier. This test tells us if the new
7527 // segment was allocated at a lower address than the old, requiring
7528 // that we start doing an upper bounds check in the write barrier.
7529 // This will also suspend the runtime if the write barrier type needs
7530 // to be changed, so we are doing this after all global state has
7531 // been updated. See the comment above suspend_EE() above for more
7533 stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address);
7539 //cleanup mess and return -1;
7543 assert(g_gc_card_table == saved_g_card_table);
7545 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7546 assert(g_gc_card_bundle_table == saved_g_card_bundle_table);
7549 //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
7550 if (!GCToOSInterface::VirtualRelease (mem, alloc_size))
7552 dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
7553 assert (!"release failed");
7561 #ifdef BACKGROUND_GC
7562 if (hp->should_commit_mark_array())
7564 dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array));
7565 if (!commit_mark_array_new_seg (hp, new_seg))
7567 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range"));
7568 set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
7572 #endif //BACKGROUND_GC
7578 //copy all of the arrays managed by the card table for a page aligned range
7579 void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
7580 short* old_brick_table,
7582 uint8_t* start, uint8_t* end)
7584 ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
7587 dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end));
7590 short* brick_start = &brick_table [brick_of (start)];
7591 if (old_brick_table)
7593 // segments are always on page boundaries
7594 memcpy (brick_start, &old_brick_table[brick_offset],
7595 size_brick_of (start, end));
7600 // This is a large heap, just clear the brick table
7603 uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
7605 #ifdef BACKGROUND_GC
7606 UNREFERENCED_PARAMETER(seg);
7607 if (recursive_gc_sync::background_running_p())
7609 uint32_t* old_mark_array = card_table_mark_array (old_ct);
7611 // We don't need to go through all the card tables here because
7612 // we only need to copy from the GC version of the mark array - when we
7613 // mark (even in allocate_large_object) we always use that mark array.
7614 if ((card_table_highest_address (old_ct) >= start) &&
7615 (card_table_lowest_address (old_ct) <= end))
7617 if ((background_saved_highest_address >= start) &&
7618 (background_saved_lowest_address <= end))
7620 //copy the mark bits
7621 // segments are always on page boundaries
7622 uint8_t* m_start = max (background_saved_lowest_address, start);
7623 uint8_t* m_end = min (background_saved_highest_address, end);
7624 memcpy (&mark_array[mark_word_of (m_start)],
7625 &old_mark_array[mark_word_of (m_start) - mark_word_of (la)],
7626 size_mark_array_of (m_start, m_end));
7631 //only large segments can be out of range
7632 assert (old_brick_table == 0);
7635 #else //BACKGROUND_GC
7637 clear_mark_array (start, heap_segment_committed(seg));
7638 #endif //BACKGROUND_GC
7641 // n way merge with all of the card table ever used in between
7642 uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);
7645 while (card_table_next (old_ct) != ct)
7647 //copy if old card table contained [start, end[
7648 if ((card_table_highest_address (ct) >= end) &&
7649 (card_table_lowest_address (ct) <= start))
7651 // or the card_tables
7653 size_t start_word = card_word (card_of (start));
7655 uint32_t* dest = &card_table[start_word];
7656 uint32_t* src = &((translate_card_table (ct))[start_word]);
7657 ptrdiff_t count = count_card_of (start, end);
7658 for (int x = 0; x < count; x++)
7662 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
7665 card_bundle_set(cardw_card_bundle(start_word+x));
7673 ct = card_table_next (ct);
7677 //initialize all of the arrays managed by the card table for a page aligned range when an existing ro segment becomes in range
7678 void gc_heap::init_brick_card_range (heap_segment* seg)
7680 dprintf (2, ("initialising tables for range [%Ix %Ix[",
7681 (size_t)heap_segment_mem (seg),
7682 (size_t)heap_segment_allocated (seg)));
7684 // initialize the brick table
7685 for (size_t b = brick_of (heap_segment_mem (seg));
7686 b < brick_of (align_on_brick (heap_segment_allocated (seg)));
7693 if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
7696 clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
7700 clear_card_for_addresses (heap_segment_mem (seg),
7701 heap_segment_allocated (seg));
7704 void gc_heap::copy_brick_card_table()
7706 uint8_t* la = lowest_address;
7707 uint8_t* ha = highest_address;
7708 MAYBE_UNUSED_VAR(ha);
7709 uint32_t* old_card_table = card_table;
7710 short* old_brick_table = brick_table;
7712 assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
7713 assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
7715 /* todo: Need a global lock for this */
7716 uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
7717 own_card_table (ct);
7718 card_table = translate_card_table (ct);
7719 /* End of global lock */
7720 highest_address = card_table_highest_address (ct);
7721 lowest_address = card_table_lowest_address (ct);
7723 brick_table = card_table_brick_table (ct);
7726 if (gc_can_use_concurrent)
7728 mark_array = translate_mark_array (card_table_mark_array (ct));
7729 assert (mark_word_of (g_gc_highest_address) ==
7730 mark_word_of (align_on_mark_word (g_gc_highest_address)));
7737 #if defined(MARK_ARRAY) && defined(_DEBUG)
7738 size_t cb_end = (size_t)((uint8_t*)card_table_card_bundle_table (ct) + size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address));
7739 #ifdef GROWABLE_SEG_MAPPING_TABLE
7740 size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
7741 size_t cb_end_aligned = align_for_seg_mapping_table (cb_end);
7742 st += (cb_end_aligned - cb_end);
7743 #else //GROWABLE_SEG_MAPPING_TABLE
7745 #endif //GROWABLE_SEG_MAPPING_TABLE
7746 #endif //MARK_ARRAY && _DEBUG
7747 card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
7749 // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
7750 // start of the untranslated table.
7751 assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
7752 card_table_card_bundle_table (ct));
7754 //set the card table if we are in a heap growth scenario
7755 if (card_bundles_enabled())
7757 card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
7758 cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
7760 //check if we need to turn on card_bundles.
7761 #ifdef MULTIPLE_HEAPS
7762 // use INT64 arithmetic here because of possible overflow on 32p
7763 uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps;
7765 // use INT64 arithmetic here because of possible overflow on 32p
7766 uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
7767 #endif //MULTIPLE_HEAPS
7768 if (reserved_memory >= th)
7770 enable_card_bundles();
7773 #endif //CARD_BUNDLE
7775 // for each of the segments and heaps, copy the brick table and
7776 // or the card table
7777 heap_segment* seg = generation_start_segment (generation_of (max_generation));
7780 if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7782 //check if it became in range
7783 if ((heap_segment_reserved (seg) > lowest_address) &&
7784 (heap_segment_mem (seg) < highest_address))
7786 set_ro_segment_in_range (seg);
7792 uint8_t* end = align_on_page (heap_segment_allocated (seg));
7793 copy_brick_card_range (la, old_card_table,
7796 align_lower_page (heap_segment_mem (seg)),
7799 seg = heap_segment_next (seg);
7802 seg = generation_start_segment (large_object_generation);
7805 if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
7807 //check if it became in range
7808 if ((heap_segment_reserved (seg) > lowest_address) &&
7809 (heap_segment_mem (seg) < highest_address))
7811 set_ro_segment_in_range (seg);
7816 uint8_t* end = align_on_page (heap_segment_allocated (seg));
7817 copy_brick_card_range (la, old_card_table,
7820 align_lower_page (heap_segment_mem (seg)),
7823 seg = heap_segment_next (seg);
7826 release_card_table (&old_card_table[card_word (card_of(la))]);
7829 #ifdef FEATURE_BASICFREEZE
7830 BOOL gc_heap::insert_ro_segment (heap_segment* seg)
7832 enter_spin_lock (&gc_heap::gc_lock);
7834 if (!gc_heap::seg_table->ensure_space_for_insert ()
7835 || (should_commit_mark_array() && !commit_mark_array_new_seg(__this, seg)))
7837 leave_spin_lock(&gc_heap::gc_lock);
7841 //insert at the head of the segment list
7842 generation* gen2 = generation_of (max_generation);
7843 heap_segment* oldhead = generation_start_segment (gen2);
7844 heap_segment_next (seg) = oldhead;
7845 generation_start_segment (gen2) = seg;
7847 seg_table->insert (heap_segment_mem(seg), (size_t)seg);
7849 #ifdef SEG_MAPPING_TABLE
7850 seg_mapping_table_add_ro_segment (seg);
7851 #endif //SEG_MAPPING_TABLE
7854 if ((heap_segment_reserved (seg) > lowest_address) &&
7855 (heap_segment_mem (seg) < highest_address))
7857 set_ro_segment_in_range (seg);
7860 FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap);
7862 leave_spin_lock (&gc_heap::gc_lock);
7866 // No one is calling this function right now. If this is getting called we need
7867 // to take care of decommitting the mark array for it - we will need to remember
7868 // which portion of the mark array was committed and only decommit that.
7869 void gc_heap::remove_ro_segment (heap_segment* seg)
7871 //clear the mark bits so a new segment allocated in its place will have a clear mark bits
7873 if (gc_can_use_concurrent)
7875 clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
7876 align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
7877 false); // read_only segments need the mark clear
7881 enter_spin_lock (&gc_heap::gc_lock);
7883 seg_table->remove ((uint8_t*)seg);
7885 #ifdef SEG_MAPPING_TABLE
7886 seg_mapping_table_remove_ro_segment (seg);
7887 #endif //SEG_MAPPING_TABLE
7889 // Locate segment (and previous segment) in the list.
7890 generation* gen2 = generation_of (max_generation);
7891 heap_segment* curr_seg = generation_start_segment (gen2);
7892 heap_segment* prev_seg = NULL;
7894 while (curr_seg && curr_seg != seg)
7896 prev_seg = curr_seg;
7897 curr_seg = heap_segment_next (curr_seg);
7899 assert (curr_seg == seg);
7901 // Patch previous segment (or list head if there is none) to skip the removed segment.
7903 heap_segment_next (prev_seg) = heap_segment_next (curr_seg);
7905 generation_start_segment (gen2) = heap_segment_next (curr_seg);
7907 leave_spin_lock (&gc_heap::gc_lock);
7909 #endif //FEATURE_BASICFREEZE
7911 BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
7914 seg->flags |= heap_segment_flags_inrange;
7915 // init_brick_card_range (seg);
7916 ro_segments_in_range = TRUE;
7917 //right now, segments aren't protected
7918 //unprotect_segment (seg);
7924 uint8_t** make_mark_list (size_t size)
7926 uint8_t** mark_list = new (nothrow) uint8_t* [size];
7930 #define swap(a,b){uint8_t* t; t = a; a = b; b = t;}
7932 void verify_qsort_array (uint8_t* *low, uint8_t* *high)
7936 for (i = low+1; i <= high; i++)
7945 #ifndef USE_INTROSORT
7946 void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth)
7948 if (((low + 16) >= high) || (depth > 100))
7952 for (i = low+1; i <= high; i++)
7955 for (j=i;j >low && val<*(j-1);j--)
7964 uint8_t *pivot, **left, **right;
7966 //sort low middle and high
7967 if (*(low+((high-low)/2)) < *low)
7968 swap (*(low+((high-low)/2)), *low);
7971 if (*high < *(low+((high-low)/2)))
7972 swap (*(low+((high-low)/2)), *high);
7974 swap (*(low+((high-low)/2)), *(high-1));
7976 left = low; right = high-1;
7978 while (*(--right) > pivot);
7979 while (*(++left) < pivot);
7982 swap(*left, *right);
7987 swap (*left, *(high-1));
7988 qsort1(low, left-1, depth+1);
7989 qsort1(left+1, high, depth+1);
7992 #endif //USE_INTROSORT
7993 void rqsort1( uint8_t* *low, uint8_t* *high)
7995 if ((low + 16) >= high)
7999 for (i = low+1; i <= high; i++)
8002 for (j=i;j >low && val>*(j-1);j--)
8011 uint8_t *pivot, **left, **right;
8013 //sort low middle and high
8014 if (*(low+((high-low)/2)) > *low)
8015 swap (*(low+((high-low)/2)), *low);
8018 if (*high > *(low+((high-low)/2)))
8019 swap (*(low+((high-low)/2)), *high);
8021 swap (*(low+((high-low)/2)), *(high-1));
8023 left = low; right = high-1;
8025 while (*(--right) < pivot);
8026 while (*(++left) > pivot);
8029 swap(*left, *right);
8034 swap (*left, *(high-1));
8035 rqsort1(low, left-1);
8036 rqsort1(left+1, high);
8040 #ifdef USE_INTROSORT
8045 static const int size_threshold = 64;
8046 static const int max_depth = 100;
8049 inline static void swap_elements(uint8_t** i,uint8_t** j)
8057 static void sort (uint8_t** begin, uint8_t** end, int ignored)
8060 introsort_loop (begin, end, max_depth);
8061 insertionsort (begin, end);
8066 static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit)
8068 while (hi-lo >= size_threshold)
8070 if (depth_limit == 0)
8075 uint8_t** p=median_partition (lo, hi);
8076 depth_limit=depth_limit-1;
8077 introsort_loop (p, hi, depth_limit);
8082 static uint8_t** median_partition (uint8_t** low, uint8_t** high)
8084 uint8_t *pivot, **left, **right;
8086 //sort low middle and high
8087 if (*(low+((high-low)/2)) < *low)
8088 swap_elements ((low+((high-low)/2)), low);
8090 swap_elements (low, high);
8091 if (*high < *(low+((high-low)/2)))
8092 swap_elements ((low+((high-low)/2)), high);
8094 swap_elements ((low+((high-low)/2)), (high-1));
8096 left = low; right = high-1;
8098 while (*(--right) > pivot);
8099 while (*(++left) < pivot);
8102 swap_elements(left, right);
8107 swap_elements (left, (high-1));
8112 static void insertionsort (uint8_t** lo, uint8_t** hi)
8114 for (uint8_t** i=lo+1; i <= hi; i++)
8118 while((j > lo) && (t <*(j-1)))
8127 static void heapsort (uint8_t** lo, uint8_t** hi)
8129 size_t n = hi - lo + 1;
8130 for (size_t i=n / 2; i >= 1; i--)
8134 for (size_t i = n; i > 1; i--)
8136 swap_elements (lo, lo + i - 1);
8137 downheap(1, i - 1, lo);
8141 static void downheap (size_t i, size_t n, uint8_t** lo)
8143 uint8_t* d = *(lo + i - 1);
8148 if (child < n && *(lo + child - 1)<(*(lo + child)))
8152 if (!(d<*(lo + child - 1)))
8156 *(lo + i - 1) = *(lo + child - 1);
8164 #endif //USE_INTROSORT
8166 #ifdef MULTIPLE_HEAPS
8167 #ifdef PARALLEL_MARK_LIST_SORT
8168 void gc_heap::sort_mark_list()
8170 // if this heap had a mark list overflow, we don't do anything
8171 if (mark_list_index > mark_list_end)
8173 // printf("sort_mark_list: overflow on heap %d\n", heap_number);
8177 // if any other heap had a mark list overflow, we fake one too,
8178 // so we don't use an incomplete mark list by mistake
8179 for (int i = 0; i < n_heaps; i++)
8181 if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end)
8183 mark_list_index = mark_list_end + 1;
8184 // printf("sort_mark_list: overflow on heap %d\n", i);
8189 // unsigned long start = GetCycleCount32();
8191 dprintf (3, ("Sorting mark lists"));
8192 if (mark_list_index > mark_list)
8193 _sort (mark_list, mark_list_index - 1, 0);
8195 // printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list);
8196 // start = GetCycleCount32();
8198 // first set the pieces for all heaps to empty
8200 for (heap_num = 0; heap_num < n_heaps; heap_num++)
8202 mark_list_piece_start[heap_num] = NULL;
8203 mark_list_piece_end[heap_num] = NULL;
8206 uint8_t** x = mark_list;
8208 // predicate means: x is still within the mark list, and within the bounds of this heap
8209 #define predicate(x) (((x) < mark_list_index) && (*(x) < heap->ephemeral_high))
8212 while (x < mark_list_index)
8215 // find the heap x points into - searching cyclically from the last heap,
8216 // because in many cases the right heap is the next one or comes soon after
8217 int last_heap_num = heap_num;
8218 MAYBE_UNUSED_VAR(last_heap_num);
8222 if (heap_num >= n_heaps)
8224 assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not!
8225 heap = g_heaps[heap_num];
8227 while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high));
8229 // x is the start of the mark list piece for this heap
8230 mark_list_piece_start[heap_num] = x;
8232 // to find the end of the mark list piece for this heap, find the first x
8233 // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list
8236 // let's see if we get lucky and the whole rest belongs to this piece
8237 if (predicate(mark_list_index-1))
8239 x = mark_list_index;
8240 mark_list_piece_end[heap_num] = x;
8244 // we play a variant of binary search to find the point sooner.
8245 // the first loop advances by increasing steps until the predicate turns false.
8246 // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true.
8251 uint8_t** temp_x = x;
8258 while (predicate(x));
8259 // we know that only the last step was wrong, so we undo it
8263 // loop invariant - predicate holds at x, but not x + inc
8264 assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc)));
8266 if (((x + inc) > x) && predicate(x + inc))
8272 // the termination condition and the loop invariant together imply this:
8273 assert(predicate(x) && !predicate(x + inc) && (inc == 1));
8274 // so the spot we're looking for is one further
8277 mark_list_piece_end[heap_num] = x;
8282 // printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start);
8285 void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end)
8287 size_t slots_needed = end - start;
8288 size_t slots_available = mark_list_end + 1 - mark_list_index;
8289 size_t slots_to_copy = min(slots_needed, slots_available);
8290 memcpy(mark_list_index, start, slots_to_copy*sizeof(*start));
8291 mark_list_index += slots_to_copy;
8292 // printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy);
8295 void gc_heap::merge_mark_lists()
8297 uint8_t** source[MAX_SUPPORTED_CPUS];
8298 uint8_t** source_end[MAX_SUPPORTED_CPUS];
8299 int source_heap[MAX_SUPPORTED_CPUS];
8300 int source_count = 0;
8302 // in case of mark list overflow, don't bother
8303 if (mark_list_index > mark_list_end)
8305 // printf("merge_mark_lists: overflow\n");
8309 dprintf(3, ("merge_mark_lists: heap_number = %d starts out with %Id entries", heap_number, mark_list_index - mark_list));
8310 // unsigned long start = GetCycleCount32();
8311 for (int i = 0; i < n_heaps; i++)
8313 gc_heap* heap = g_heaps[i];
8314 if (heap->mark_list_piece_start[heap_number] < heap->mark_list_piece_end[heap_number])
8316 source[source_count] = heap->mark_list_piece_start[heap_number];
8317 source_end[source_count] = heap->mark_list_piece_end[heap_number];
8318 source_heap[source_count] = i;
8319 if (source_count < MAX_SUPPORTED_CPUS)
8323 // printf("first phase of merge_mark_lists for heap %d took %u cycles\n", heap_number, GetCycleCount32() - start);
8325 dprintf(3, ("heap_number = %d has %d sources\n", heap_number, source_count));
8326 #if defined(_DEBUG) || defined(TRACE_GC)
8327 for (int j = 0; j < source_count; j++)
8329 dprintf(3, ("heap_number = %d ", heap_number));
8330 dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)",
8331 (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j])));
8332 // the sources should all be sorted
8333 for (uint8_t **x = source[j]; x < source_end[j] - 1; x++)
8337 dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, heap_number));
8342 #endif //_DEBUG || TRACE_GC
8344 // start = GetCycleCount32();
8346 mark_list = &g_mark_list_copy [heap_number*mark_list_size];
8347 mark_list_index = mark_list;
8348 mark_list_end = &mark_list [mark_list_size-1];
8349 int piece_count = 0;
8350 if (source_count == 0)
8354 else if (source_count == 1)
8356 mark_list = source[0];
8357 mark_list_index = source_end[0];
8358 mark_list_end = mark_list_index;
8363 while (source_count > 1)
8365 // find the lowest and second lowest value in the sources we're merging from
8366 int lowest_source = 0;
8367 uint8_t *lowest = *source[0];
8368 uint8_t *second_lowest = *source[1];
8369 for (int i = 1; i < source_count; i++)
8371 if (lowest > *source[i])
8373 second_lowest = lowest;
8374 lowest = *source[i];
8377 else if (second_lowest > *source[i])
8379 second_lowest = *source[i];
8383 // find the point in the lowest source where it either runs out or is not <= second_lowest anymore
8385 // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common
8387 if (source_end[lowest_source][-1] <= second_lowest)
8388 x = source_end[lowest_source];
8391 // use linear search to find the end -- could also use binary search as in sort_mark_list,
8392 // but saw no improvement doing that
8393 for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++)
8397 // blast this piece to the mark list
8398 append_to_mark_list(source[lowest_source], x);
8401 source[lowest_source] = x;
8403 // check whether this source is now exhausted
8404 if (x >= source_end[lowest_source])
8406 // if it's not the source with the highest index, copy the source with the highest index
8407 // over it so the non-empty sources are always at the beginning
8408 if (lowest_source < source_count-1)
8410 source[lowest_source] = source[source_count-1];
8411 source_end[lowest_source] = source_end[source_count-1];
8416 // we're left with just one source that we copy
8417 append_to_mark_list(source[0], source_end[0]);
8421 // printf("second phase of merge_mark_lists for heap %d took %u cycles to merge %d pieces\n", heap_number, GetCycleCount32() - start, piece_count);
8423 #if defined(_DEBUG) || defined(TRACE_GC)
8424 // the final mark list must be sorted
8425 for (uint8_t **x = mark_list; x < mark_list_index - 1; x++)
8429 dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number));
8433 #endif //defined(_DEBUG) || defined(TRACE_GC)
8435 #else //PARALLEL_MARK_LIST_SORT
8436 void gc_heap::combine_mark_lists()
8438 dprintf (3, ("Combining mark lists"));
8439 //verify if a heap has overflowed its mark list
8440 BOOL use_mark_list = TRUE;
8441 for (int i = 0; i < n_heaps; i++)
8443 if (g_heaps [i]->mark_list_index > g_heaps [i]->mark_list_end)
8445 use_mark_list = FALSE;
8452 dprintf (3, ("Using mark list"));
8453 //compact the gaps out of the mark list
8455 uint8_t** current_gap = g_heaps [gn]->mark_list_index;
8456 uint8_t** current_gap_end = g_heaps[gn]->mark_list_end + 1;
8457 uint8_t** dst_last = current_gap-1;
8459 int srcn = n_heaps-1;
8460 gc_heap* srch = g_heaps [srcn];
8461 uint8_t** src = srch->mark_list_index - 1;
8462 uint8_t** src_beg = srch->mark_list;
8464 while (current_gap <= src)
8466 while ((gn < n_heaps-1) && (current_gap >= current_gap_end))
8468 //go to the next gap
8470 dprintf (3, ("Going to the next gap %d", gn));
8471 assert (gn < n_heaps);
8472 current_gap = g_heaps [gn]->mark_list_index;
8473 current_gap_end = g_heaps[gn]->mark_list_end + 1;
8474 assert ((gn == (n_heaps-1)) || (current_gap_end == g_heaps[gn+1]->mark_list));
8476 while ((srcn > 0) && (src < src_beg))
8478 //go to the previous source
8480 dprintf (3, ("going to the previous source %d", srcn));
8482 gc_heap* srch = g_heaps [srcn];
8483 src = srch->mark_list_index - 1;
8484 src_beg = srch->mark_list;
8486 if (current_gap < src)
8488 dst_last = current_gap;
8489 *current_gap++ = *src--;
8492 dprintf (3, ("src: %Ix dst_last: %Ix", (size_t)src, (size_t)dst_last));
8494 uint8_t** end_of_list = max (src, dst_last);
8496 //sort the resulting compacted list
8497 assert (end_of_list < &g_mark_list [n_heaps*mark_list_size]);
8498 if (end_of_list > &g_mark_list[0])
8499 _sort (&g_mark_list[0], end_of_list, 0);
8500 //adjust the mark_list to the begining of the resulting mark list.
8501 for (int i = 0; i < n_heaps; i++)
8503 g_heaps [i]->mark_list = g_mark_list;
8504 g_heaps [i]->mark_list_index = end_of_list + 1;
8505 g_heaps [i]->mark_list_end = end_of_list + 1;
8510 uint8_t** end_of_list = g_mark_list;
8511 //adjust the mark_list to the begining of the resulting mark list.
8512 //put the index beyond the end to turn off mark list processing
8513 for (int i = 0; i < n_heaps; i++)
8515 g_heaps [i]->mark_list = g_mark_list;
8516 g_heaps [i]->mark_list_index = end_of_list + 1;
8517 g_heaps [i]->mark_list_end = end_of_list;
8521 #endif // PARALLEL_MARK_LIST_SORT
8522 #endif //MULTIPLE_HEAPS
8525 class seg_free_spaces
8527 struct seg_free_space
8533 struct free_space_bucket
8535 seg_free_space* free_space;
8536 ptrdiff_t count_add; // Assigned when we first contruct the array.
8537 ptrdiff_t count_fit; // How many items left when we are fitting plugs.
8540 void move_bucket (int old_power2, int new_power2)
8542 // PREFAST warning 22015: old_power2 could be negative
8543 assert (old_power2 >= 0);
8544 assert (old_power2 >= new_power2);
8546 if (old_power2 == new_power2)
8551 seg_free_space* src_index = free_space_buckets[old_power2].free_space;
8552 for (int i = old_power2; i > new_power2; i--)
8554 seg_free_space** dest = &(free_space_buckets[i].free_space);
8557 seg_free_space* dest_index = free_space_buckets[i - 1].free_space;
8558 if (i > (new_power2 + 1))
8560 seg_free_space temp = *src_index;
8561 *src_index = *dest_index;
8564 src_index = dest_index;
8567 free_space_buckets[old_power2].count_fit--;
8568 free_space_buckets[new_power2].count_fit++;
8573 void dump_free_space (seg_free_space* item)
8580 mark* m = (mark*)(item->start);
8581 len = pinned_len (m);
8582 addr = pinned_plug (m) - len;
8586 heap_segment* seg = (heap_segment*)(item->start);
8587 addr = heap_segment_plan_allocated (seg);
8588 len = heap_segment_committed (seg) - addr;
8591 dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len));
8596 seg_free_space* item = NULL;
8599 dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num));
8600 for (i = 0; i < (free_space_bucket_count - 1); i++)
8602 dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8603 dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8604 item = free_space_buckets[i].free_space;
8605 while (item < free_space_buckets[i + 1].free_space)
8607 dump_free_space (item);
8610 dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8613 dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
8614 dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
8615 item = free_space_buckets[i].free_space;
8617 while (item <= &seg_free_space_array[free_space_item_count - 1])
8619 dump_free_space (item);
8622 dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
8627 free_space_bucket* free_space_buckets;
8628 seg_free_space* seg_free_space_array;
8629 ptrdiff_t free_space_bucket_count;
8630 ptrdiff_t free_space_item_count;
8634 BOOL has_end_of_seg;
8639 seg_free_spaces (int h_number)
8641 heap_num = h_number;
8646 size_t total_prealloc_size =
8647 MAX_NUM_BUCKETS * sizeof (free_space_bucket) +
8648 MAX_NUM_FREE_SPACES * sizeof (seg_free_space);
8650 free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size];
8652 return (!!free_space_buckets);
8655 // We take the ordered free space array we got from the 1st pass,
8656 // and feed the portion that we decided to use to this method, ie,
8657 // the largest item_count free spaces.
8658 void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count)
8660 assert (free_space_buckets);
8661 assert (item_count <= (size_t)MAX_PTR);
8663 free_space_bucket_count = bucket_count;
8664 free_space_item_count = item_count;
8667 has_end_of_seg = FALSE;
8670 ptrdiff_t total_item_count = 0;
8673 seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count);
8675 for (i = 0; i < (ptrdiff_t)item_count; i++)
8677 seg_free_space_array[i].start = 0;
8678 seg_free_space_array[i].is_plug = FALSE;
8681 for (i = 0; i < bucket_count; i++)
8683 free_space_buckets[i].count_add = ordered_free_spaces[i];
8684 free_space_buckets[i].count_fit = ordered_free_spaces[i];
8685 free_space_buckets[i].free_space = &seg_free_space_array[total_item_count];
8686 total_item_count += free_space_buckets[i].count_add;
8689 assert (total_item_count == (ptrdiff_t)item_count);
8692 // If we are adding a free space before a plug we pass the
8693 // mark stack position so we can update the length; we could
8694 // also be adding the free space after the last plug in which
8695 // case start is the segment which we'll need to update the
8696 // heap_segment_plan_allocated.
8697 void add (void* start, BOOL plug_p, BOOL first_p)
8699 size_t size = (plug_p ?
8700 pinned_len ((mark*)start) :
8701 (heap_segment_committed ((heap_segment*)start) -
8702 heap_segment_plan_allocated ((heap_segment*)start)));
8706 dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size));
8710 dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size));
8712 has_end_of_seg = TRUE;
8718 size_t eph_gen_starts = gc_heap::eph_gen_starts_size;
8719 size -= eph_gen_starts;
8722 mark* m = (mark*)(start);
8723 pinned_len (m) -= eph_gen_starts;
8727 heap_segment* seg = (heap_segment*)start;
8728 heap_segment_plan_allocated (seg) += eph_gen_starts;
8732 int bucket_power2 = index_of_highest_set_bit (size);
8733 if (bucket_power2 < base_power2)
8738 free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2];
8740 seg_free_space* bucket_free_space = bucket->free_space;
8741 assert (plug_p || (!plug_p && bucket->count_add));
8743 if (bucket->count_add == 0)
8745 dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2));
8749 ptrdiff_t index = bucket->count_add - 1;
8751 dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)",
8754 (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) :
8755 heap_segment_plan_allocated ((heap_segment*)start)),
8761 bucket_free_space[index].is_plug = TRUE;
8764 bucket_free_space[index].start = start;
8765 bucket->count_add--;
8770 // Do a consistency check after all free spaces are added.
8774 int end_of_seg_count = 0;
8776 for (i = 0; i < free_space_item_count; i++)
8778 assert (seg_free_space_array[i].start);
8779 if (!(seg_free_space_array[i].is_plug))
8787 assert (end_of_seg_count == 1);
8791 assert (end_of_seg_count == 0);
8794 for (i = 0; i < free_space_bucket_count; i++)
8796 assert (free_space_buckets[i].count_add == 0);
8802 uint8_t* fit (uint8_t* old_loc,
8804 BOOL set_padding_on_saved_p,
8805 mark* pinned_plug_entry,
8806 #endif //SHORT_PLUGS
8808 REQD_ALIGN_AND_OFFSET_DCL)
8813 assert (!is_plug_padded (old_loc));
8814 #endif //SHORT_PLUGS
8815 assert (!node_realigned (old_loc));
8818 size_t saved_plug_size = plug_size;
8820 #ifdef FEATURE_STRUCTALIGN
8821 // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account
8822 _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false);
8823 #endif // FEATURE_STRUCTALIGN
8824 // TODO: this is also not large alignment ready. We would need to consider alignment when chosing the
8827 size_t plug_size_to_fit = plug_size;
8829 // best fit is only done for gen1 to gen2 and we do not pad in gen2.
8830 int pad_in_front = 0;
8833 plug_size_to_fit += (pad_in_front ? Align(min_obj_size) : 0);
8834 #endif //SHORT_PLUGS
8836 int plug_power2 = index_of_highest_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size)));
8838 uint8_t* new_address = 0;
8840 if (plug_power2 < base_power2)
8842 plug_power2 = base_power2;
8845 int chosen_power2 = plug_power2 - base_power2;
8847 for (i = chosen_power2; i < free_space_bucket_count; i++)
8849 if (free_space_buckets[i].count_fit != 0)
8856 dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space",
8860 (chosen_power2 + base_power2)));
8862 assert (i < free_space_bucket_count);
8864 seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space;
8865 ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit;
8866 size_t new_free_space_size = 0;
8867 BOOL can_fit = FALSE;
8870 for (i = 0; i < free_space_count; i++)
8872 size_t free_space_size = 0;
8875 BOOL short_plugs_padding_p = FALSE;
8876 #endif //SHORT_PLUGS
8877 BOOL realign_padding_p = FALSE;
8879 if (bucket_free_space[i].is_plug)
8881 mark* m = (mark*)(bucket_free_space[i].start);
8882 uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m);
8885 if ((pad_in_front & USE_PADDING_FRONT) &&
8886 (((plug_free_space_start - pin_allocation_context_start_region (m))==0) ||
8887 ((plug_free_space_start - pin_allocation_context_start_region (m))>=DESIRED_PLUG_LENGTH)))
8889 pad = Align (min_obj_size);
8890 short_plugs_padding_p = TRUE;
8892 #endif //SHORT_PLUGS
8894 if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start+pad)))
8896 pad += switch_alignment_size (pad != 0);
8897 realign_padding_p = TRUE;
8900 plug_size = saved_plug_size + pad;
8902 free_space_size = pinned_len (m);
8903 new_address = pinned_plug (m) - pinned_len (m);
8905 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8906 free_space_size == plug_size)
8908 new_free_space_size = free_space_size - plug_size;
8909 pinned_len (m) = new_free_space_size;
8910 #ifdef SIMPLE_DPRINTF
8911 dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)",
8918 index_of_highest_set_bit (free_space_size),
8919 (pinned_plug (m) - pinned_len (m)),
8920 index_of_highest_set_bit (new_free_space_size)));
8921 #endif //SIMPLE_DPRINTF
8924 if (short_plugs_padding_p)
8926 pin_allocation_context_start_region (m) = plug_free_space_start;
8927 set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
8929 #endif //SHORT_PLUGS
8931 if (realign_padding_p)
8933 set_node_realigned (old_loc);
8941 heap_segment* seg = (heap_segment*)(bucket_free_space[i].start);
8942 free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
8944 if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg))))
8946 pad = switch_alignment_size (FALSE);
8947 realign_padding_p = TRUE;
8950 plug_size = saved_plug_size + pad;
8952 if (free_space_size >= (plug_size + Align (min_obj_size)) ||
8953 free_space_size == plug_size)
8955 new_address = heap_segment_plan_allocated (seg);
8956 new_free_space_size = free_space_size - plug_size;
8957 heap_segment_plan_allocated (seg) = new_address + plug_size;
8958 #ifdef SIMPLE_DPRINTF
8959 dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)",
8964 index_of_highest_set_bit (free_space_size),
8965 heap_segment_plan_allocated (seg),
8966 index_of_highest_set_bit (new_free_space_size)));
8967 #endif //SIMPLE_DPRINTF
8969 if (realign_padding_p)
8970 set_node_realigned (old_loc);
8984 assert (chosen_power2 == 0);
8994 assert ((chosen_power2 && (i == 0)) ||
8995 ((!chosen_power2) && (i < free_space_count)));
8998 int new_bucket_power2 = index_of_highest_set_bit (new_free_space_size);
9000 if (new_bucket_power2 < base_power2)
9002 new_bucket_power2 = base_power2;
9005 move_bucket (chosen_power2, new_bucket_power2 - base_power2);
9014 if (free_space_buckets)
9016 delete [] free_space_buckets;
9018 if (seg_free_space_array)
9020 delete [] seg_free_space_array;
9026 #define marked(i) header(i)->IsMarked()
9027 #define set_marked(i) header(i)->SetMarked()
9028 #define clear_marked(i) header(i)->ClearMarked()
9029 #define pinned(i) header(i)->IsPinned()
9030 #define set_pinned(i) header(i)->SetPinned()
9031 #define clear_pinned(i) header(i)->GetHeader()->ClrGCBit();
9033 inline size_t my_get_size (Object* ob)
9035 MethodTable* mT = header(ob)->GetMethodTable();
9036 return (mT->GetBaseSize() +
9037 (mT->HasComponentSize() ?
9038 ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0));
9041 //#define size(i) header(i)->GetSize()
9042 #define size(i) my_get_size (header(i))
9044 #define contain_pointers(i) header(i)->ContainsPointers()
9045 #ifdef COLLECTIBLE_CLASS
9046 #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible()
9048 #define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i)
9049 #define is_collectible(i) method_table(i)->Collectible()
9050 #else //COLLECTIBLE_CLASS
9051 #define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
9052 #endif //COLLECTIBLE_CLASS
9054 #if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
9056 void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
9058 uint8_t* range_beg = 0;
9059 uint8_t* range_end = 0;
9060 if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end))
9062 clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE
9063 #ifdef FEATURE_BASICFREEZE
9065 #endif // FEATURE_BASICFREEZE
9070 void gc_heap::clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9072 if ((start < background_saved_highest_address) &&
9073 (end > background_saved_lowest_address))
9075 start = max (start, background_saved_lowest_address);
9076 end = min (end, background_saved_highest_address);
9078 size_t start_mark_bit = mark_bit_of (start);
9079 size_t end_mark_bit = mark_bit_of (end);
9080 unsigned int startbit = mark_bit_bit (start_mark_bit);
9081 unsigned int endbit = mark_bit_bit (end_mark_bit);
9082 size_t startwrd = mark_bit_word (start_mark_bit);
9083 size_t endwrd = mark_bit_word (end_mark_bit);
9085 dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
9086 (size_t)start, (size_t)start_mark_bit,
9087 (size_t)end, (size_t)end_mark_bit));
9089 unsigned int firstwrd = lowbits (~0, startbit);
9090 unsigned int lastwrd = highbits (~0, endbit);
9092 if (startwrd == endwrd)
9094 unsigned int wrd = firstwrd | lastwrd;
9095 mark_array[startwrd] &= wrd;
9099 // clear the first mark word.
9102 mark_array[startwrd] &= firstwrd;
9106 for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
9108 mark_array[wrdtmp] = 0;
9111 // clear the last mark word.
9114 mark_array[endwrd] &= lastwrd;
9119 void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
9121 if ((start < background_saved_highest_address) &&
9122 (end > background_saved_lowest_address))
9124 start = max (start, background_saved_lowest_address);
9125 end = min (end, background_saved_highest_address);
9127 clear_batch_mark_array_bits (start, end);
9131 void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
9133 dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix",
9135 int align_const = get_alignment_constant (!loh_p);
9141 uint8_t* next_o = o + Align (size (o), align_const);
9143 if (background_object_marked (o, TRUE))
9145 dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
9151 #endif //MARK_ARRAY && BACKGROUND_GC
9154 BOOL gc_heap::is_mark_set (uint8_t* o)
9159 #if defined (_MSC_VER) && defined (_TARGET_X86_)
9160 #pragma optimize("y", on) // Small critical routines, don't put in EBP frame
9161 #endif //_MSC_VER && _TARGET_X86_
9163 // return the generation number of an object.
9164 // It is assumed that the object is valid.
9165 //Note that this will return max_generation for a LOH object
9166 int gc_heap::object_gennum (uint8_t* o)
9168 if (in_range_for_segment (o, ephemeral_heap_segment) &&
9169 (o >= generation_allocation_start (generation_of (max_generation-1))))
9171 // in an ephemeral generation.
9172 for ( int i = 0; i < max_generation-1; i++)
9174 if ((o >= generation_allocation_start (generation_of (i))))
9177 return max_generation-1;
9181 return max_generation;
9185 int gc_heap::object_gennum_plan (uint8_t* o)
9187 if (in_range_for_segment (o, ephemeral_heap_segment))
9189 for (int i = 0; i <= max_generation-1; i++)
9191 uint8_t* plan_start = generation_plan_allocation_start (generation_of (i));
9192 if (plan_start && (o >= plan_start))
9198 return max_generation;
9201 #if defined(_MSC_VER) && defined(_TARGET_X86_)
9202 #pragma optimize("", on) // Go back to command line default optimizations
9203 #endif //_MSC_VER && _TARGET_X86_
9205 heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
9207 size_t initial_commit = SEGMENT_INITIAL_COMMIT;
9209 //Commit the first page
9210 if (!virtual_commit (new_pages, initial_commit, h_number))
9215 //overlay the heap_segment
9216 heap_segment* new_segment = (heap_segment*)new_pages;
9218 uint8_t* start = new_pages + segment_info_size;
9219 heap_segment_mem (new_segment) = start;
9220 heap_segment_used (new_segment) = start;
9221 heap_segment_reserved (new_segment) = new_pages + size;
9222 heap_segment_committed (new_segment) = new_pages + initial_commit;
9223 init_heap_segment (new_segment);
9224 dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
9228 void gc_heap::init_heap_segment (heap_segment* seg)
9231 heap_segment_next (seg) = 0;
9232 heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
9233 heap_segment_allocated (seg) = heap_segment_mem (seg);
9234 #ifdef BACKGROUND_GC
9235 heap_segment_background_allocated (seg) = 0;
9236 heap_segment_saved_bg_allocated (seg) = 0;
9237 #endif //BACKGROUND_GC
9240 //Releases the segment to the OS.
9241 // this is always called on one thread only so calling seg_table->remove is fine.
9242 void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
9244 if (!heap_segment_loh_p (seg))
9246 //cleanup the brick table back to the empty value
9247 clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg));
9250 if (consider_hoarding)
9252 assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE));
9253 size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg);
9254 //Don't keep the big ones.
9255 if (ss <= INITIAL_ALLOC)
9257 dprintf (2, ("Hoarding segment %Ix", (size_t)seg));
9258 #ifdef BACKGROUND_GC
9259 // We don't need to clear the decommitted flag because when this segment is used
9260 // for a new segment the flags will be cleared.
9261 if (!heap_segment_decommitted_p (seg))
9262 #endif //BACKGROUND_GC
9264 decommit_heap_segment (seg);
9267 #ifdef SEG_MAPPING_TABLE
9268 seg_mapping_table_remove_segment (seg);
9269 #endif //SEG_MAPPING_TABLE
9271 heap_segment_next (seg) = segment_standby_list;
9272 segment_standby_list = seg;
9279 dprintf (2, ("h%d: del seg: [%Ix, %Ix[",
9280 heap_number, (size_t)seg,
9281 (size_t)(heap_segment_reserved (seg))));
9283 #ifdef BACKGROUND_GC
9284 ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg),
9285 settings.gc_index, current_bgc_state,
9287 decommit_mark_array_by_seg (seg);
9288 #endif //BACKGROUND_GC
9290 #ifdef SEG_MAPPING_TABLE
9291 seg_mapping_table_remove_segment (seg);
9292 #else //SEG_MAPPING_TABLE
9293 seg_table->remove ((uint8_t*)seg);
9294 #endif //SEG_MAPPING_TABLE
9296 release_segment (seg);
9300 //resets the pages beyond allocates size so they won't be swapped out and back in
9302 void gc_heap::reset_heap_segment_pages (heap_segment* seg)
9304 size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
9305 size_t size = (size_t)heap_segment_committed (seg) - page_start;
9307 GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
9310 void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
9313 uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
9314 size_t size = heap_segment_committed (seg) - page_start;
9315 extra_space = align_on_page (extra_space);
9316 if (size >= max ((extra_space + 2*OS_PAGE_SIZE), 100*OS_PAGE_SIZE))
9318 page_start += max(extra_space, 32*OS_PAGE_SIZE);
9319 size -= max (extra_space, 32*OS_PAGE_SIZE);
9321 virtual_decommit (page_start, size, heap_number);
9322 dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)",
9324 (size_t)(page_start + size),
9326 heap_segment_committed (seg) = page_start;
9327 if (heap_segment_used (seg) > heap_segment_committed (seg))
9329 heap_segment_used (seg) = heap_segment_committed (seg);
9334 //decommit all pages except one or 2
9335 void gc_heap::decommit_heap_segment (heap_segment* seg)
9337 uint8_t* page_start = align_on_page (heap_segment_mem (seg));
9339 dprintf (3, ("Decommitting heap segment %Ix", (size_t)seg));
9341 #ifdef BACKGROUND_GC
9342 page_start += OS_PAGE_SIZE;
9343 #endif //BACKGROUND_GC
9345 size_t size = heap_segment_committed (seg) - page_start;
9346 virtual_decommit (page_start, size, heap_number);
9348 //re-init the segment object
9349 heap_segment_committed (seg) = page_start;
9350 if (heap_segment_used (seg) > heap_segment_committed (seg))
9352 heap_segment_used (seg) = heap_segment_committed (seg);
9356 void gc_heap::clear_gen0_bricks()
9358 if (!gen0_bricks_cleared)
9360 gen0_bricks_cleared = TRUE;
9361 //initialize brick table for gen 0
9362 for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
9363 b < brick_of (align_on_brick
9364 (heap_segment_allocated (ephemeral_heap_segment)));
9372 #ifdef BACKGROUND_GC
9373 void gc_heap::rearrange_small_heap_segments()
9375 heap_segment* seg = freeable_small_heap_segment;
9378 heap_segment* next_seg = heap_segment_next (seg);
9379 // TODO: we need to consider hoarding here.
9380 delete_heap_segment (seg, FALSE);
9383 freeable_small_heap_segment = 0;
9385 #endif //BACKGROUND_GC
9387 void gc_heap::rearrange_large_heap_segments()
9389 dprintf (2, ("deleting empty large segments"));
9390 heap_segment* seg = freeable_large_heap_segment;
9393 heap_segment* next_seg = heap_segment_next (seg);
9394 delete_heap_segment (seg, GCConfig::GetRetainVM());
9397 freeable_large_heap_segment = 0;
9400 void gc_heap::rearrange_heap_segments(BOOL compacting)
9403 generation_start_segment (generation_of (max_generation));
9405 heap_segment* prev_seg = 0;
9406 heap_segment* next_seg = 0;
9409 next_seg = heap_segment_next (seg);
9411 //link ephemeral segment when expanding
9412 if ((next_seg == 0) && (seg != ephemeral_heap_segment))
9414 seg->next = ephemeral_heap_segment;
9415 next_seg = heap_segment_next (seg);
9418 //re-used expanded heap segment
9419 if ((seg == ephemeral_heap_segment) && next_seg)
9421 heap_segment_next (prev_seg) = next_seg;
9422 heap_segment_next (seg) = 0;
9426 uint8_t* end_segment = (compacting ?
9427 heap_segment_plan_allocated (seg) :
9428 heap_segment_allocated (seg));
9429 // check if the segment was reached by allocation
9430 if ((end_segment == heap_segment_mem (seg))&&
9431 !heap_segment_read_only_p (seg))
9433 //if not, unthread and delete
9435 assert (seg != ephemeral_heap_segment);
9436 heap_segment_next (prev_seg) = next_seg;
9437 delete_heap_segment (seg, GCConfig::GetRetainVM());
9439 dprintf (2, ("Deleting heap segment %Ix", (size_t)seg));
9443 if (!heap_segment_read_only_p (seg))
9447 heap_segment_allocated (seg) =
9448 heap_segment_plan_allocated (seg);
9451 // reset the pages between allocated and committed.
9452 if (seg != ephemeral_heap_segment)
9454 decommit_heap_segment_pages (seg, 0);
9468 uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch
9470 #ifdef TIME_WRITE_WATCH
9471 static unsigned int tot_cycles = 0;
9472 #endif //TIME_WRITE_WATCH
9476 inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word)
9479 for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++)
9481 if (!card_bundle_set_p (x))
9483 assert (!"Card bundle not set");
9484 dprintf (3, ("Card bundle %Ix not set", x));
9490 // Verifies that any bundles that are not set represent only cards that are not set.
9491 inline void gc_heap::verify_card_bundles()
9494 size_t lowest_card = card_word (card_of (lowest_address));
9495 size_t highest_card = card_word (card_of (highest_address));
9496 size_t cardb = cardw_card_bundle (lowest_card);
9497 size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));
9499 while (cardb < end_cardb)
9501 uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
9502 uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];
9504 if (card_bundle_set_p (cardb) == 0)
9506 // Verify that no card is set
9507 while (card_word < card_word_end)
9509 if (*card_word != 0)
9511 dprintf (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
9512 dd_collection_count (dynamic_data_of (0)),
9513 (size_t)(card_word-&card_table[0]),
9514 (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
9517 assert((*card_word)==0);
9527 // If card bundles are enabled, use write watch to find pages in the card table that have
9528 // been dirtied, and set the corresponding card bundle bits.
9529 void gc_heap::update_card_table_bundle()
9531 if (card_bundles_enabled())
9533 // The address of the card word containing the card representing the lowest heap address
9534 uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
9536 // The address of the card word containing the card representing the highest heap address
9537 uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
9539 uint8_t* saved_base_address = base_address;
9540 uintptr_t bcount = array_size;
9541 size_t saved_region_size = align_on_page (high_address) - saved_base_address;
9545 size_t region_size = align_on_page (high_address) - base_address;
9547 dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
9548 bool success = GCToOSInterface::GetWriteWatch(false /* resetState */,
9551 (void**)g_addresses,
9553 assert (success && "GetWriteWatch failed!");
9555 dprintf (3,("Found %d pages written", bcount));
9556 for (unsigned i = 0; i < bcount; i++)
9558 // Offset of the dirty page from the start of the card table (clamped to base_address)
9559 size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
9561 // Offset of the end of the page from the start of the card table (clamped to high addr)
9562 size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
9563 assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
9565 // Set the card bundle bits representing the dirty card table page
9566 card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)));
9567 dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));
9569 verify_card_bundle_bits_set(bcardw, ecardw);
9572 if (bcount >= array_size)
9574 base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
9575 bcount = array_size;
9578 } while ((bcount >= array_size) && (base_address < high_address));
9580 // Now that we've updated the card bundle bits, reset the write-tracking state.
9581 GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
9584 #endif //CARD_BUNDLE
9587 void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size)
9589 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9590 SoftwareWriteWatch::ClearDirty(base_address, region_size);
9591 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9592 GCToOSInterface::ResetWriteWatch(base_address, region_size);
9593 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9597 void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended)
9599 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9600 SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended);
9601 #else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9602 UNREFERENCED_PARAMETER(is_runtime_suspended);
9603 bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref);
9605 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9608 const size_t ww_reset_quantum = 128*1024*1024;
9611 void gc_heap::switch_one_quantum()
9613 enable_preemptive ();
9614 GCToOSInterface::Sleep (1);
9615 disable_preemptive (true);
9618 void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size)
9620 size_t reset_size = 0;
9621 size_t remaining_reset_size = 0;
9622 size_t next_reset_size = 0;
9624 while (reset_size != total_reset_size)
9626 remaining_reset_size = total_reset_size - reset_size;
9627 next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
9628 if (next_reset_size)
9630 reset_write_watch_for_gc_heap(start_address, next_reset_size);
9631 reset_size += next_reset_size;
9633 switch_one_quantum();
9637 assert (reset_size == total_reset_size);
9640 // This does a Sleep(1) for every reset ww_reset_quantum bytes of reset
9641 // we do concurrently.
9642 void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
9646 *current_total_reset_size += last_reset_size;
9648 dprintf (2, ("reset %Id bytes so far", *current_total_reset_size));
9650 if (*current_total_reset_size > ww_reset_quantum)
9652 switch_one_quantum();
9654 *current_total_reset_size = 0;
9659 void gc_heap::reset_write_watch (BOOL concurrent_p)
9661 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9662 // Software write watch currently requires the runtime to be suspended during reset. See SoftwareWriteWatch::ClearDirty().
9663 assert(!concurrent_p);
9664 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9666 heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
9668 PREFIX_ASSUME(seg != NULL);
9670 size_t reset_size = 0;
9671 size_t region_size = 0;
9673 dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address));
9677 uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9678 base_address = max (base_address, background_saved_lowest_address);
9680 uint8_t* high_address = 0;
9681 high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
9682 high_address = min (high_address, background_saved_highest_address);
9684 if (base_address < high_address)
9686 region_size = high_address - base_address;
9688 #ifdef TIME_WRITE_WATCH
9689 unsigned int time_start = GetCycleCount32();
9690 #endif //TIME_WRITE_WATCH
9691 dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9692 //reset_ww_by_chunk (base_address, region_size);
9693 reset_write_watch_for_gc_heap(base_address, region_size);
9695 #ifdef TIME_WRITE_WATCH
9696 unsigned int time_stop = GetCycleCount32();
9697 tot_cycles += time_stop - time_start;
9698 printf ("ResetWriteWatch Duration: %d, total: %d\n",
9699 time_stop - time_start, tot_cycles);
9700 #endif //TIME_WRITE_WATCH
9702 switch_on_reset (concurrent_p, &reset_size, region_size);
9705 seg = heap_segment_next_rw (seg);
9707 concurrent_print_time_delta ("CRWW soh");
9710 //concurrent_print_time_delta ("CRW soh");
9712 seg = heap_segment_rw (generation_start_segment (large_object_generation));
9714 PREFIX_ASSUME(seg != NULL);
9718 uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
9719 uint8_t* high_address = heap_segment_allocated (seg);
9721 base_address = max (base_address, background_saved_lowest_address);
9722 high_address = min (high_address, background_saved_highest_address);
9724 if (base_address < high_address)
9726 region_size = high_address - base_address;
9728 #ifdef TIME_WRITE_WATCH
9729 unsigned int time_start = GetCycleCount32();
9730 #endif //TIME_WRITE_WATCH
9731 dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
9732 //reset_ww_by_chunk (base_address, region_size);
9733 reset_write_watch_for_gc_heap(base_address, region_size);
9735 #ifdef TIME_WRITE_WATCH
9736 unsigned int time_stop = GetCycleCount32();
9737 tot_cycles += time_stop - time_start;
9738 printf ("ResetWriteWatch Duration: %d, total: %d\n",
9739 time_stop - time_start, tot_cycles);
9740 #endif //TIME_WRITE_WATCH
9742 switch_on_reset (concurrent_p, &reset_size, region_size);
9745 seg = heap_segment_next_rw (seg);
9747 concurrent_print_time_delta ("CRWW loh");
9750 #ifdef DEBUG_WRITE_WATCH
9751 debug_write_watch = (uint8_t**)~0;
9752 #endif //DEBUG_WRITE_WATCH
9755 #endif //WRITE_WATCH
9757 #ifdef BACKGROUND_GC
9758 void gc_heap::restart_vm()
9760 //assert (generation_allocation_pointer (youngest_generation) == 0);
9761 dprintf (3, ("Restarting EE"));
9762 STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Retarting EE\n");
9763 ee_proceed_event.Set();
9767 void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p)
9769 if (awr != awr_ignored)
9773 FIRE_EVENT(BGCAllocWaitBegin, awr);
9777 FIRE_EVENT(BGCAllocWaitEnd, awr);
9783 void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr)
9785 fire_alloc_wait_event (awr, TRUE);
9789 void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr)
9791 fire_alloc_wait_event (awr, FALSE);
9793 #endif //BACKGROUND_GC
9794 void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* start, uint8_t* pointer)
9796 gen.allocation_start = start;
9797 gen.allocation_context.alloc_ptr = pointer;
9798 gen.allocation_context.alloc_limit = pointer;
9799 gen.allocation_context.alloc_bytes = 0;
9800 gen.allocation_context.alloc_bytes_loh = 0;
9801 gen.allocation_context_start_region = pointer;
9802 gen.start_segment = seg;
9803 gen.allocation_segment = seg;
9804 gen.plan_allocation_start = 0;
9805 gen.free_list_space = 0;
9806 gen.pinned_allocated = 0;
9807 gen.free_list_allocated = 0;
9808 gen.end_seg_allocated = 0;
9809 gen.condemned_allocated = 0;
9810 gen.free_obj_space = 0;
9811 gen.allocation_size = 0;
9812 gen.pinned_allocation_sweep_size = 0;
9813 gen.pinned_allocation_compact_size = 0;
9814 gen.allocate_end_seg_p = FALSE;
9815 gen.free_list_allocator.clear();
9817 #ifdef FREE_USAGE_STATS
9818 memset (gen.gen_free_spaces, 0, sizeof (gen.gen_free_spaces));
9819 memset (gen.gen_current_pinned_free_spaces, 0, sizeof (gen.gen_current_pinned_free_spaces));
9820 memset (gen.gen_plugs, 0, sizeof (gen.gen_plugs));
9821 #endif //FREE_USAGE_STATS
9824 void gc_heap::adjust_ephemeral_limits ()
9826 ephemeral_low = generation_allocation_start (generation_of (max_generation - 1));
9827 ephemeral_high = heap_segment_reserved (ephemeral_heap_segment);
9829 dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix",
9830 (size_t)ephemeral_low, (size_t)ephemeral_high))
9832 #ifndef MULTIPLE_HEAPS
9833 // This updates the write barrier helpers with the new info.
9834 stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high);
9835 #endif // MULTIPLE_HEAPS
9838 #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
9839 FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config)
9843 if (!temp_logfile_name.Get())
9848 char logfile_name[MAX_LONGPATH+1];
9849 uint32_t pid = GCToOSInterface::GetCurrentProcessId();
9850 const char* suffix = is_config ? ".config.log" : ".log";
9851 _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix);
9852 logFile = fopen(logfile_name, "wb");
9855 #endif //TRACE_GC || GC_CONFIG_DRIVEN
9857 size_t gc_heap::get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps)
9859 assert (heap_hard_limit);
9860 size_t aligned_hard_limit = ((heap_hard_limit + min_segment_size_hard_limit - 1) & ~(min_segment_size_hard_limit - 1));
9861 if (should_adjust_num_heaps)
9863 uint32_t max_num_heaps = (uint32_t)(aligned_hard_limit / min_segment_size_hard_limit);
9864 if (*num_heaps > max_num_heaps)
9866 *num_heaps = max_num_heaps;
9870 size_t seg_size = aligned_hard_limit / *num_heaps;
9871 size_t aligned_seg_size = round_up_power2 (seg_size);
9873 assert (g_theGCHeap->IsValidSegmentSize (aligned_seg_size));
9875 size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
9876 if (seg_size_from_config)
9878 size_t aligned_seg_size_config = round_up_power2 (seg_size_from_config);
9880 aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config);
9883 //printf ("limit: %Idmb, aligned: %Idmb, %d heaps, seg size from config: %Idmb, seg size %Idmb",
9884 // (heap_hard_limit / 1024 / 1024),
9885 // (aligned_hard_limit / 1024 / 1024),
9887 // (seg_size_from_config / 1024 / 1024),
9888 // (aligned_seg_size / 1024 / 1024));
9889 return aligned_seg_size;
9892 HRESULT gc_heap::initialize_gc (size_t segment_size,
9894 #ifdef MULTIPLE_HEAPS
9895 ,unsigned number_of_heaps
9896 #endif //MULTIPLE_HEAPS
9900 if (GCConfig::GetLogEnabled())
9902 gc_log = CreateLogFile(GCConfig::GetLogFile(), false);
9907 // GCLogFileSize in MBs.
9908 gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize());
9910 if (gc_log_file_size <= 0 || gc_log_file_size > 500)
9916 gc_log_lock.Initialize();
9917 gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
9924 memset (gc_log_buffer, '*', gc_log_buffer_size);
9926 max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size;
9930 #ifdef GC_CONFIG_DRIVEN
9931 if (GCConfig::GetConfigLogEnabled())
9933 gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true);
9935 if (gc_config_log == NULL)
9938 gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
9939 if (!gc_config_log_buffer)
9941 fclose(gc_config_log);
9945 compact_ratio = static_cast<int>(GCConfig::GetCompactRatio());
9947 // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
9948 cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
9952 "C", // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not
9953 "EX", // heap expansion
9955 "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg.
9958 "PreS", // short object before pinned plug
9959 "PostS", // short object after pinned plug
9960 "Merge", // merged pinned plugs
9961 "Conv", // converted to pinned plug
9962 "Pre", // plug before pinned plug but not after
9963 "Post", // plug after pinned plug but not before
9964 "PrPo", // plug both before and after pinned plug
9965 "PreP", // pre short object padded
9966 "PostP" // post short object padded
9969 #endif //GC_CONFIG_DRIVEN
9972 GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
9973 if (logFileName.Get() != nullptr)
9975 GCStatistics::logFileName = _strdup(logFileName.Get());
9976 GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
9977 if (!GCStatistics::logFile)
9984 HRESULT hres = S_OK;
9987 hardware_write_watch_api_supported();
9988 #ifdef BACKGROUND_GC
9989 if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC())
9991 gc_can_use_concurrent = true;
9992 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9993 virtual_alloc_hardware_write_watch = true;
9994 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
9998 gc_can_use_concurrent = false;
10000 #endif //BACKGROUND_GC
10001 #endif //WRITE_WATCH
10003 #ifdef BACKGROUND_GC
10004 // leave the first page to contain only segment info
10005 // because otherwise we could need to revisit the first page frequently in
10007 segment_info_size = OS_PAGE_SIZE;
10009 segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE));
10010 #endif //BACKGROUND_GC
10012 reserved_memory = 0;
10013 unsigned block_count;
10014 #ifdef MULTIPLE_HEAPS
10015 reserved_memory_limit = (segment_size + heap_size) * number_of_heaps;
10016 block_count = number_of_heaps;
10017 #else //MULTIPLE_HEAPS
10018 reserved_memory_limit = segment_size + heap_size;
10020 #endif //MULTIPLE_HEAPS
10022 if (heap_hard_limit)
10024 check_commit_cs.Initialize();
10027 if (!reserve_initial_memory(segment_size,heap_size,block_count))
10028 return E_OUTOFMEMORY;
10031 //check if we need to turn on card_bundles.
10032 #ifdef MULTIPLE_HEAPS
10033 // use INT64 arithmetic here because of possible overflow on 32p
10034 uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps;
10036 // use INT64 arithmetic here because of possible overflow on 32p
10037 uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
10038 #endif //MULTIPLE_HEAPS
10040 if (can_use_write_watch_for_card_table() && reserved_memory >= th)
10042 settings.card_bundles = TRUE;
10046 settings.card_bundles = FALSE;
10048 #endif //CARD_BUNDLE
10050 settings.first_init();
10052 int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel());
10053 if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last)
10055 gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config);
10058 init_static_data();
10060 g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);
10062 if (!g_gc_card_table)
10063 return E_OUTOFMEMORY;
10065 gc_started = FALSE;
10067 #ifdef MULTIPLE_HEAPS
10068 g_heaps = new (nothrow) gc_heap* [number_of_heaps];
10070 return E_OUTOFMEMORY;
10073 #pragma warning(push)
10074 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
10075 #endif // _PREFAST_
10076 g_promoted = new (nothrow) size_t [number_of_heaps*16];
10077 g_bpromoted = new (nothrow) size_t [number_of_heaps*16];
10079 g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
10080 #endif //MH_SC_MARK
10082 #pragma warning(pop)
10083 #endif // _PREFAST_
10084 if (!g_promoted || !g_bpromoted)
10085 return E_OUTOFMEMORY;
10088 if (!g_mark_stack_busy)
10089 return E_OUTOFMEMORY;
10090 #endif //MH_SC_MARK
10092 if (!create_thread_support (number_of_heaps))
10093 return E_OUTOFMEMORY;
10095 if (!heap_select::init (number_of_heaps))
10096 return E_OUTOFMEMORY;
10098 #endif //MULTIPLE_HEAPS
10100 #ifdef MULTIPLE_HEAPS
10101 yp_spin_count_unit = 32 * number_of_heaps;
10103 yp_spin_count_unit = 32 * g_num_processors;
10104 #endif //MULTIPLE_HEAPS
10106 #if defined(__linux__)
10107 GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
10108 static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)),
10109 static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)),
10110 static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private)));
10111 #endif // __linux__
10113 if (!init_semi_shared())
10121 //Initializes PER_HEAP_ISOLATED data members.
10123 gc_heap::init_semi_shared()
10127 // This is used for heap expansion - it's to fix exactly the start for gen 0
10128 // through (max_generation-1). When we expand the heap we allocate all these
10129 // gen starts at the beginning of the new ephemeral seg.
10130 eph_gen_starts_size = (Align (min_obj_size)) * max_generation;
10133 #ifdef MULTIPLE_HEAPS
10134 mark_list_size = min (150*1024, max (8192, soh_segment_size/(2*10*32)));
10135 g_mark_list = make_mark_list (mark_list_size*n_heaps);
10137 min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2;
10138 #ifdef PARALLEL_MARK_LIST_SORT
10139 g_mark_list_copy = make_mark_list (mark_list_size*n_heaps);
10140 if (!g_mark_list_copy)
10144 #endif //PARALLEL_MARK_LIST_SORT
10146 #else //MULTIPLE_HEAPS
10148 mark_list_size = max (8192, soh_segment_size/(64*32));
10149 g_mark_list = make_mark_list (mark_list_size);
10151 #endif //MULTIPLE_HEAPS
10153 dprintf (3, ("mark_list_size: %d", mark_list_size));
10161 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10162 if (!seg_mapping_table_init())
10164 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10166 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10167 seg_table = sorted_table::make_sorted_table();
10171 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10173 segment_standby_list = 0;
10175 if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE))
10179 if (!full_gc_end_event.CreateManualEventNoThrow(FALSE))
10184 fgn_maxgen_percent = 0;
10185 fgn_loh_percent = 0;
10186 full_gc_approach_event_set = false;
10188 memset (full_gc_counts, 0, sizeof (full_gc_counts));
10191 should_expand_in_full_gc = FALSE;
10193 #ifdef FEATURE_LOH_COMPACTION
10194 loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0;
10195 loh_compaction_mode = loh_compaction_default;
10196 #endif //FEATURE_LOH_COMPACTION
10198 loh_size_threshold = (size_t)GCConfig::GetLOHThreshold();
10199 assert (loh_size_threshold >= LARGE_OBJECT_SIZE);
10201 #ifdef BACKGROUND_GC
10202 memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts));
10203 bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount());
10204 bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin());
10207 int number_bgc_threads = 1;
10208 #ifdef MULTIPLE_HEAPS
10209 number_bgc_threads = n_heaps;
10210 #endif //MULTIPLE_HEAPS
10211 if (!create_bgc_threads_support (number_bgc_threads))
10216 #endif //BACKGROUND_GC
10218 memset (¤t_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
10220 #ifdef GC_CONFIG_DRIVEN
10221 compact_or_sweep_gcs[0] = 0;
10222 compact_or_sweep_gcs[1] = 0;
10223 #endif //GC_CONFIG_DRIVEN
10226 short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size));
10227 #endif //SHORT_PLUGS
10235 if (full_gc_approach_event.IsValid())
10237 full_gc_approach_event.CloseEvent();
10239 if (full_gc_end_event.IsValid())
10241 full_gc_end_event.CloseEvent();
10248 gc_heap* gc_heap::make_gc_heap (
10249 #ifdef MULTIPLE_HEAPS
10252 #endif //MULTIPLE_HEAPS
10257 #ifdef MULTIPLE_HEAPS
10258 res = new (nothrow) gc_heap;
10262 res->vm_heap = vm_hp;
10263 res->alloc_context_count = 0;
10266 #ifdef PARALLEL_MARK_LIST_SORT
10267 res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps];
10268 if (!res->mark_list_piece_start)
10272 #pragma warning(push)
10273 #pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
10274 #endif // _PREFAST_
10275 res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing
10277 #pragma warning(pop)
10278 #endif // _PREFAST_
10280 if (!res->mark_list_piece_end)
10282 #endif //PARALLEL_MARK_LIST_SORT
10286 #endif //MULTIPLE_HEAPS
10288 if (res->init_gc_heap (
10289 #ifdef MULTIPLE_HEAPS
10291 #else //MULTIPLE_HEAPS
10293 #endif //MULTIPLE_HEAPS
10299 #ifdef MULTIPLE_HEAPS
10302 return (gc_heap*)1;
10303 #endif //MULTIPLE_HEAPS
10307 gc_heap::wait_for_gc_done(int32_t timeOut)
10309 bool cooperative_mode = enable_preemptive ();
10311 uint32_t dwWaitResult = NOERROR;
10313 gc_heap* wait_heap = NULL;
10314 while (gc_heap::gc_started)
10316 #ifdef MULTIPLE_HEAPS
10317 wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL, 0))->pGenGCHeap;
10318 dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number));
10319 #endif // MULTIPLE_HEAPS
10322 PREFIX_ASSUME(wait_heap != NULL);
10323 #endif // _PREFAST_
10325 dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE);
10327 disable_preemptive (cooperative_mode);
10329 return dwWaitResult;
10333 gc_heap::set_gc_done()
10335 enter_gc_done_event_lock();
10336 if (!gc_done_event_set)
10338 gc_done_event_set = true;
10339 dprintf (2, ("heap %d: setting gc_done_event", heap_number));
10340 gc_done_event.Set();
10342 exit_gc_done_event_lock();
10346 gc_heap::reset_gc_done()
10348 enter_gc_done_event_lock();
10349 if (gc_done_event_set)
10351 gc_done_event_set = false;
10352 dprintf (2, ("heap %d: resetting gc_done_event", heap_number));
10353 gc_done_event.Reset();
10355 exit_gc_done_event_lock();
10359 gc_heap::enter_gc_done_event_lock()
10361 uint32_t dwSwitchCount = 0;
10364 if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0)
10366 while (gc_done_event_lock >= 0)
10368 if (g_num_processors > 1)
10370 int spin_count = yp_spin_count_unit;
10371 for (int j = 0; j < spin_count; j++)
10373 if (gc_done_event_lock < 0)
10375 YieldProcessor(); // indicate to the processor that we are spinning
10377 if (gc_done_event_lock >= 0)
10378 GCToOSInterface::YieldThread(++dwSwitchCount);
10381 GCToOSInterface::YieldThread(++dwSwitchCount);
10388 gc_heap::exit_gc_done_event_lock()
10390 gc_done_event_lock = -1;
10393 #ifndef MULTIPLE_HEAPS
10395 #ifdef RECORD_LOH_STATE
10396 int gc_heap::loh_state_index = 0;
10397 gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states];
10398 #endif //RECORD_LOH_STATE
10400 VOLATILE(int32_t) gc_heap::gc_done_event_lock;
10401 VOLATILE(bool) gc_heap::gc_done_event_set;
10402 GCEvent gc_heap::gc_done_event;
10403 #endif //!MULTIPLE_HEAPS
10404 VOLATILE(bool) gc_heap::internal_gc_done;
10406 void gc_heap::add_saved_spinlock_info (
10408 msl_enter_state enter_state,
10409 msl_take_state take_state)
10412 #ifdef SPINLOCK_HISTORY
10413 spinlock_info* current = &last_spinlock_info[spinlock_info_index];
10415 current->enter_state = enter_state;
10416 current->take_state = take_state;
10417 current->thread_id.SetToCurrentThread();
10418 current->loh_p = loh_p;
10419 dprintf (SPINLOCK_LOG, ("[%d]%s %s %s",
10421 (loh_p ? "loh" : "soh"),
10422 ((enter_state == me_acquire) ? "E" : "L"),
10423 msl_take_state_str[take_state]));
10425 spinlock_info_index++;
10427 assert (spinlock_info_index <= max_saved_spinlock_info);
10429 if (spinlock_info_index >= max_saved_spinlock_info)
10431 spinlock_info_index = 0;
10434 MAYBE_UNUSED_VAR(enter_state);
10435 MAYBE_UNUSED_VAR(take_state);
10436 #endif //SPINLOCK_HISTORY
10440 gc_heap::init_gc_heap (int h_number)
10442 #ifdef MULTIPLE_HEAPS
10446 allocated_since_last_gc = 0;
10448 #ifdef SPINLOCK_HISTORY
10449 spinlock_info_index = 0;
10450 memset (last_spinlock_info, 0, sizeof(last_spinlock_info));
10451 #endif //SPINLOCK_HISTORY
10453 // initialize per heap members.
10454 ephemeral_low = (uint8_t*)1;
10456 ephemeral_high = MAX_PTR;
10458 ephemeral_heap_segment = 0;
10460 freeable_large_heap_segment = 0;
10462 condemned_generation_num = 0;
10464 blocking_collection = FALSE;
10466 generation_skip_ratio = 100;
10468 mark_stack_tos = 0;
10470 mark_stack_bos = 0;
10472 mark_stack_array_length = 0;
10474 mark_stack_array = 0;
10476 #if defined (_DEBUG) && defined (VERIFY_HEAP)
10477 verify_pinned_queue_p = FALSE;
10478 #endif // _DEBUG && VERIFY_HEAP
10480 loh_pinned_queue_tos = 0;
10482 loh_pinned_queue_bos = 0;
10484 loh_pinned_queue_length = 0;
10486 loh_pinned_queue_decay = LOH_PIN_DECAY;
10488 loh_pinned_queue = 0;
10490 min_overflow_address = MAX_PTR;
10492 max_overflow_address = 0;
10494 gen0_bricks_cleared = FALSE;
10496 gen0_must_clear_bricks = 0;
10498 allocation_quantum = CLR_SIZE;
10500 more_space_lock_soh = gc_lock;
10502 more_space_lock_loh = gc_lock;
10504 ro_segments_in_range = FALSE;
10506 loh_alloc_since_cg = 0;
10508 new_heap_segment = NULL;
10510 gen0_allocated_after_gc_p = false;
10512 #ifdef RECORD_LOH_STATE
10513 loh_state_index = 0;
10514 #endif //RECORD_LOH_STATE
10515 #endif //MULTIPLE_HEAPS
10517 #ifdef MULTIPLE_HEAPS
10518 if (h_number > n_heaps)
10520 assert (!"Number of heaps exceeded");
10524 heap_number = h_number;
10525 #endif //MULTIPLE_HEAPS
10527 memset (&oom_info, 0, sizeof (oom_info));
10528 memset (&fgm_result, 0, sizeof (fgm_result));
10529 if (!gc_done_event.CreateManualEventNoThrow(FALSE))
10533 gc_done_event_lock = -1;
10534 gc_done_event_set = false;
10536 #ifndef SEG_MAPPING_TABLE
10537 if (!gc_heap::seg_table->ensure_space_for_insert ())
10541 #endif //!SEG_MAPPING_TABLE
10543 heap_segment* seg = get_initial_segment (soh_segment_size, h_number);
10547 FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg),
10548 (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
10549 gc_etw_segment_small_object_heap);
10551 #ifdef SEG_MAPPING_TABLE
10552 seg_mapping_table_add_segment (seg, __this);
10553 #else //SEG_MAPPING_TABLE
10554 seg_table->insert ((uint8_t*)seg, sdelta);
10555 #endif //SEG_MAPPING_TABLE
10557 #ifdef MULTIPLE_HEAPS
10558 heap_segment_heap (seg) = this;
10559 #endif //MULTIPLE_HEAPS
10561 /* todo: Need a global lock for this */
10562 uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
10563 own_card_table (ct);
10564 card_table = translate_card_table (ct);
10565 /* End of global lock */
10567 brick_table = card_table_brick_table (ct);
10568 highest_address = card_table_highest_address (ct);
10569 lowest_address = card_table_lowest_address (ct);
10572 card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
10573 assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
10574 card_table_card_bundle_table (ct));
10575 #endif //CARD_BUNDLE
10578 if (gc_can_use_concurrent)
10579 mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
10582 #endif //MARK_ARRAY
10584 uint8_t* start = heap_segment_mem (seg);
10586 for (int i = 0; i < 1 + max_generation; i++)
10588 make_generation (generation_table [ (max_generation - i) ],
10590 generation_table [(max_generation - i)].gen_num = max_generation - i;
10591 start += Align (min_obj_size);
10594 heap_segment_allocated (seg) = start;
10595 alloc_allocated = start;
10596 heap_segment_used (seg) = start - plug_skew;
10598 ephemeral_heap_segment = seg;
10600 #ifndef SEG_MAPPING_TABLE
10601 if (!gc_heap::seg_table->ensure_space_for_insert ())
10605 #endif //!SEG_MAPPING_TABLE
10606 //Create the large segment generation
10607 heap_segment* lseg = get_initial_segment(min_loh_segment_size, h_number);
10610 lseg->flags |= heap_segment_flags_loh;
10612 FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg),
10613 (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
10614 gc_etw_segment_large_object_heap);
10616 #ifdef SEG_MAPPING_TABLE
10617 seg_mapping_table_add_segment (lseg, __this);
10618 #else //SEG_MAPPING_TABLE
10619 seg_table->insert ((uint8_t*)lseg, sdelta);
10620 #endif //SEG_MAPPING_TABLE
10622 generation_table [max_generation].free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST, gen2_alloc_list);
10623 //assign the alloc_list for the large generation
10624 generation_table [max_generation+1].free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST, loh_alloc_list);
10625 generation_table [max_generation+1].gen_num = max_generation+1;
10626 make_generation (generation_table [max_generation+1],lseg, heap_segment_mem (lseg), 0);
10627 heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
10628 heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
10630 for (int gen_num = 0; gen_num <= 1 + max_generation; gen_num++)
10632 generation* gen = generation_of (gen_num);
10633 make_unused_array (generation_allocation_start (gen), Align (min_obj_size));
10636 #ifdef MULTIPLE_HEAPS
10637 heap_segment_heap (lseg) = this;
10639 //initialize the alloc context heap
10640 generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);
10642 //initialize the alloc context heap
10643 generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);
10645 #endif //MULTIPLE_HEAPS
10647 //Do this only once
10648 #ifdef MULTIPLE_HEAPS
10650 #endif //MULTIPLE_HEAPS
10652 #ifndef INTERIOR_POINTERS
10653 //set the brick_table for large objects
10654 //but default value is clearded
10655 //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
10656 // (uint8_t*)heap_segment_reserved (lseg));
10658 #else //INTERIOR_POINTERS
10660 //Because of the interior pointer business, we have to clear
10661 //the whole brick table
10662 //but the default value is cleared
10663 // clear_brick_table (lowest_address, highest_address);
10664 #endif //INTERIOR_POINTERS
10667 if (!init_dynamic_data())
10672 etw_allocation_running_amount[0] = 0;
10673 etw_allocation_running_amount[1] = 0;
10675 //needs to be done after the dynamic data has been initialized
10676 #ifndef MULTIPLE_HEAPS
10677 allocation_running_amount = dd_min_size (dynamic_data_of (0));
10678 #endif //!MULTIPLE_HEAPS
10680 fgn_last_alloc = dd_min_size (dynamic_data_of (0));
10682 mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]);
10686 make_mark_stack(arr);
10688 #ifdef BACKGROUND_GC
10689 freeable_small_heap_segment = 0;
10690 gchist_index_per_heap = 0;
10691 uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
10695 make_background_mark_stack (b_arr);
10696 #endif //BACKGROUND_GC
10698 ephemeral_low = generation_allocation_start(generation_of(max_generation - 1));
10699 ephemeral_high = heap_segment_reserved(ephemeral_heap_segment);
10700 if (heap_number == 0)
10702 stomp_write_barrier_initialize(
10703 #ifdef MULTIPLE_HEAPS
10704 reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0)
10706 ephemeral_low, ephemeral_high
10707 #endif //!MULTIPLE_HEAPS
10712 // why would we clear the mark array for this page? it should be cleared..
10713 // clear the first committed page
10714 //if(gc_can_use_concurrent)
10716 // clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
10718 #endif //MARK_ARRAY
10720 #ifdef MULTIPLE_HEAPS
10721 //register the heap in the heaps array
10723 if (!create_gc_thread ())
10726 g_heaps [heap_number] = this;
10728 #endif //MULTIPLE_HEAPS
10730 #ifdef FEATURE_PREMORTEM_FINALIZATION
10731 HRESULT hr = AllocateCFinalize(&finalize_queue);
10734 #endif // FEATURE_PREMORTEM_FINALIZATION
10736 max_free_space_items = MAX_NUM_FREE_SPACES;
10738 bestfit_seg = new (nothrow) seg_free_spaces (heap_number);
10745 if (!bestfit_seg->alloc())
10750 last_gc_before_oom = FALSE;
10752 sufficient_gen0_space_p = FALSE;
10754 #ifdef MULTIPLE_HEAPS
10756 #ifdef HEAP_ANALYZE
10758 heap_analyze_success = TRUE;
10760 internal_root_array = 0;
10762 internal_root_array_index = 0;
10764 internal_root_array_length = initial_internal_roots;
10768 current_obj_size = 0;
10770 #endif //HEAP_ANALYZE
10772 #endif // MULTIPLE_HEAPS
10774 #ifdef BACKGROUND_GC
10775 bgc_thread_id.Clear();
10777 if (!create_bgc_thread_support())
10782 bgc_alloc_lock = new (nothrow) exclusive_sync;
10783 if (!bgc_alloc_lock)
10788 bgc_alloc_lock->init();
10792 if (!recursive_gc_sync::init())
10796 bgc_thread_running = 0;
10798 bgc_threads_timeout_cs.Initialize();
10799 expanded_in_fgc = 0;
10800 current_bgc_state = bgc_not_in_process;
10801 background_soh_alloc_count = 0;
10802 background_loh_alloc_count = 0;
10803 bgc_overflow_count = 0;
10804 end_loh_size = dd_min_size (dynamic_data_of (max_generation + 1));
10805 #endif //BACKGROUND_GC
10807 #ifdef GC_CONFIG_DRIVEN
10808 memset (interesting_data_per_heap, 0, sizeof (interesting_data_per_heap));
10809 memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap));
10810 memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap));
10811 memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap));
10812 #endif //GC_CONFIG_DRIVEN
10818 gc_heap::destroy_semi_shared()
10820 //TODO: will need to move this to per heap
10821 //#ifdef BACKGROUND_GC
10822 // if (c_mark_list)
10823 // delete c_mark_list;
10824 //#endif //BACKGROUND_GC
10828 delete g_mark_list;
10831 #if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
10832 if (seg_mapping_table)
10833 delete seg_mapping_table;
10834 #endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
10836 #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
10837 //destroy the segment map
10838 seg_table->delete_sorted_table();
10839 #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
10843 gc_heap::self_destroy()
10845 #ifdef BACKGROUND_GC
10847 #endif //BACKGROUND_GC
10849 if (gc_done_event.IsValid())
10851 gc_done_event.CloseEvent();
10854 // destroy every segment.
10855 heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
10857 PREFIX_ASSUME(seg != NULL);
10859 heap_segment* next_seg;
10862 next_seg = heap_segment_next_rw (seg);
10863 delete_heap_segment (seg);
10867 seg = heap_segment_rw (generation_start_segment (generation_of (max_generation+1)));
10869 PREFIX_ASSUME(seg != NULL);
10873 next_seg = heap_segment_next_rw (seg);
10874 delete_heap_segment (seg);
10878 // get rid of the card table
10879 release_card_table (card_table);
10881 // destroy the mark stack
10882 delete mark_stack_array;
10884 #ifdef FEATURE_PREMORTEM_FINALIZATION
10885 if (finalize_queue)
10886 delete finalize_queue;
10887 #endif // FEATURE_PREMORTEM_FINALIZATION
10891 gc_heap::destroy_gc_heap(gc_heap* heap)
10893 heap->self_destroy();
10897 // Destroys resources owned by gc. It is assumed that a last GC has been performed and that
10898 // the finalizer queue has been drained.
10899 void gc_heap::shutdown_gc()
10901 destroy_semi_shared();
10903 #ifdef MULTIPLE_HEAPS
10904 //delete the heaps array
10906 destroy_thread_support();
10908 #endif //MULTIPLE_HEAPS
10909 //destroy seg_manager
10911 destroy_initial_memory();
10913 GCToOSInterface::Shutdown();
10917 BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10918 uint8_t* old_loc, int use_padding)
10920 BOOL already_padded = FALSE;
10922 if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT))
10924 alloc_pointer = alloc_pointer + Align (min_obj_size);
10925 already_padded = TRUE;
10927 #endif //SHORT_PLUGS
10929 if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer)))
10930 size = size + switch_alignment_size (already_padded);
10932 #ifdef FEATURE_STRUCTALIGN
10933 alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset);
10934 #endif // FEATURE_STRUCTALIGN
10936 // in allocate_in_condemned_generation we can have this when we
10937 // set the alloc_limit to plan_allocated which could be less than
10939 if (alloc_limit < alloc_pointer)
10946 return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0)))
10948 ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit))
10949 #else //SHORT_PLUGS
10950 ||((alloc_pointer + size) == alloc_limit)
10951 #endif //SHORT_PLUGS
10956 assert (size == Align (min_obj_size));
10957 return ((size_t)(alloc_limit - alloc_pointer) >= size);
10962 BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
10965 // We could have run into cases where this is true when alloc_allocated is the
10966 // the same as the seg committed.
10967 if (alloc_limit < alloc_pointer)
10972 return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const)));
10975 // Grow by committing more pages
10976 BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p)
10978 assert (high_address <= heap_segment_reserved (seg));
10980 if (hard_limit_exceeded_p)
10981 *hard_limit_exceeded_p = false;
10983 //return 0 if we are at the end of the segment.
10984 if (align_on_page (high_address) > heap_segment_reserved (seg))
10987 if (high_address <= heap_segment_committed (seg))
10990 size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg)));
10991 c_size = max (c_size, commit_min_th);
10992 c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg)));
10997 STRESS_LOG2(LF_GC, LL_INFO10000,
10998 "Growing heap_segment: %Ix high address: %Ix\n",
10999 (size_t)seg, (size_t)high_address);
11001 bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_number, hard_limit_exceeded_p);
11005 #ifndef BACKGROUND_GC
11006 clear_mark_array (heap_segment_committed (seg),
11007 heap_segment_committed (seg)+c_size, TRUE);
11008 #endif //BACKGROUND_GC
11009 #endif //MARK_ARRAY
11010 heap_segment_committed (seg) += c_size;
11012 STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix",
11013 (size_t)heap_segment_committed (seg));
11015 assert (heap_segment_committed (seg) <= heap_segment_reserved (seg));
11016 assert (high_address <= heap_segment_committed (seg));
11023 int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL)
11026 if ((old_loc != 0) && pad_front_p)
11028 allocated = allocated + Align (min_obj_size);
11030 #endif //SHORT_PLUGS
11032 if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated)))
11033 size = size + switch_alignment_size (FALSE);
11034 #ifdef FEATURE_STRUCTALIGN
11035 size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset);
11036 return grow_heap_segment (seg, allocated + pad + size);
11037 #else // FEATURE_STRUCTALIGN
11038 return grow_heap_segment (seg, allocated + size);
11039 #endif // FEATURE_STRUCTALIGN
11042 //used only in older generation allocation (i.e during gc).
11043 void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
11046 UNREFERENCED_PARAMETER(gennum);
11047 dprintf (3, ("gc Expanding segment allocation"));
11048 heap_segment* seg = generation_allocation_segment (gen);
11049 if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
11051 if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg))
11053 assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg));
11054 assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg));
11055 heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
11059 uint8_t* hole = generation_allocation_pointer (gen);
11060 size_t size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
11064 dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size));
11065 size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen);
11066 if (size >= Align (min_free_list))
11068 if (allocated_size < min_free_list)
11070 if (size >= (Align (min_free_list) + Align (min_obj_size)))
11072 //split hole into min obj + threadable free item
11073 make_unused_array (hole, min_obj_size);
11074 generation_free_obj_space (gen) += Align (min_obj_size);
11075 make_unused_array (hole + Align (min_obj_size), size - Align (min_obj_size));
11076 generation_free_list_space (gen) += size - Align (min_obj_size);
11077 generation_allocator(gen)->thread_item_front (hole + Align (min_obj_size),
11078 size - Align (min_obj_size));
11079 add_gen_free (gen->gen_num, (size - Align (min_obj_size)));
11083 dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size));
11084 make_unused_array (hole, size);
11085 generation_free_obj_space (gen) += size;
11090 dprintf (3, ("threading hole in front of free list"));
11091 make_unused_array (hole, size);
11092 generation_free_list_space (gen) += size;
11093 generation_allocator(gen)->thread_item_front (hole, size);
11094 add_gen_free (gen->gen_num, size);
11099 make_unused_array (hole, size);
11100 generation_free_obj_space (gen) += size;
11104 generation_allocation_pointer (gen) = start;
11105 generation_allocation_context_start_region (gen) = start;
11107 generation_allocation_limit (gen) = (start + limit_size);
11110 void verify_mem_cleared (uint8_t* start, size_t size)
11112 if (!Aligned (size))
11117 PTR_PTR curr_ptr = (PTR_PTR) start;
11118 for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
11120 if (*(curr_ptr++) != 0)
11127 #if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
11128 void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11130 size_t start_mark_bit = mark_bit_of (start);
11131 size_t end_mark_bit = mark_bit_of (end);
11132 unsigned int startbit = mark_bit_bit (start_mark_bit);
11133 unsigned int endbit = mark_bit_bit (end_mark_bit);
11134 size_t startwrd = mark_bit_word (start_mark_bit);
11135 size_t endwrd = mark_bit_word (end_mark_bit);
11137 dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
11138 (size_t)start, (size_t)start_mark_bit,
11139 (size_t)end, (size_t)end_mark_bit));
11141 unsigned int firstwrd = ~(lowbits (~0, startbit));
11142 unsigned int lastwrd = ~(highbits (~0, endbit));
11144 if (startwrd == endwrd)
11146 unsigned int wrd = firstwrd & lastwrd;
11147 mark_array[startwrd] |= wrd;
11151 // set the first mark word.
11154 mark_array[startwrd] |= firstwrd;
11158 for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11160 mark_array[wrdtmp] = ~(unsigned int)0;
11163 // set the last mark word.
11166 mark_array[endwrd] |= lastwrd;
11170 // makes sure that the mark array bits between start and end are 0.
11171 void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end)
11173 size_t start_mark_bit = mark_bit_of (start);
11174 size_t end_mark_bit = mark_bit_of (end);
11175 unsigned int startbit = mark_bit_bit (start_mark_bit);
11176 unsigned int endbit = mark_bit_bit (end_mark_bit);
11177 size_t startwrd = mark_bit_word (start_mark_bit);
11178 size_t endwrd = mark_bit_word (end_mark_bit);
11180 //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
11181 // (size_t)start, (size_t)start_mark_bit,
11182 // (size_t)end, (size_t)end_mark_bit));
11184 unsigned int firstwrd = ~(lowbits (~0, startbit));
11185 unsigned int lastwrd = ~(highbits (~0, endbit));
11187 if (startwrd == endwrd)
11189 unsigned int wrd = firstwrd & lastwrd;
11190 if (mark_array[startwrd] & wrd)
11192 dprintf (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
11194 mark_array [startwrd], mark_word_address (startwrd)));
11200 // set the first mark word.
11203 if (mark_array[startwrd] & firstwrd)
11205 dprintf (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
11206 firstwrd, startwrd,
11207 mark_array [startwrd], mark_word_address (startwrd)));
11214 for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
11216 if (mark_array[wrdtmp])
11218 dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
11220 mark_array [wrdtmp], mark_word_address (wrdtmp)));
11225 // set the last mark word.
11228 if (mark_array[endwrd] & lastwrd)
11230 dprintf (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
11232 mark_array [lastwrd], mark_word_address (lastwrd)));
11237 #endif //VERIFY_HEAP && BACKGROUND_GC
11239 allocator::allocator (unsigned int num_b, size_t fbs, alloc_list* b)
11241 assert (num_b < MAX_BUCKET_COUNT);
11242 num_buckets = num_b;
11243 frst_bucket_size = fbs;
11247 alloc_list& allocator::alloc_list_of (unsigned int bn)
11249 assert (bn < num_buckets);
11251 return first_bucket;
11253 return buckets [bn-1];
11256 size_t& allocator::alloc_list_damage_count_of (unsigned int bn)
11258 assert (bn < num_buckets);
11260 return first_bucket.alloc_list_damage_count();
11262 return buckets [bn-1].alloc_list_damage_count();
11265 void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p)
11267 //unlink the free_item
11268 alloc_list* al = &alloc_list_of (bn);
11271 if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY))
11273 assert (item == free_list_slot (prev_item));
11274 free_list_undo (prev_item) = item;
11275 alloc_list_damage_count_of (bn)++;
11277 free_list_slot (prev_item) = free_list_slot(item);
11281 al->alloc_list_head() = (uint8_t*)free_list_slot(item);
11283 if (al->alloc_list_tail() == item)
11285 al->alloc_list_tail() = prev_item;
11289 void allocator::clear()
11291 for (unsigned int i = 0; i < num_buckets; i++)
11293 alloc_list_head_of (i) = 0;
11294 alloc_list_tail_of (i) = 0;
11298 //always thread to the end.
11299 void allocator::thread_free_item (uint8_t* item, uint8_t*& head, uint8_t*& tail)
11301 free_list_slot (item) = 0;
11302 free_list_undo (item) = UNDO_EMPTY;
11303 assert (item != head);
11309 //TODO: This shouldn't happen anymore - verify that's the case.
11310 //the following is necessary because the last free element
11311 //may have been truncated, and tail isn't updated.
11312 else if (free_list_slot (head) == 0)
11314 free_list_slot (head) = item;
11318 assert (item != tail);
11319 assert (free_list_slot(tail) == 0);
11320 free_list_slot (tail) = item;
11325 void allocator::thread_item (uint8_t* item, size_t size)
11327 size_t sz = frst_bucket_size;
11328 unsigned int a_l_number = 0;
11330 for (; a_l_number < (num_buckets-1); a_l_number++)
11338 alloc_list* al = &alloc_list_of (a_l_number);
11339 thread_free_item (item,
11340 al->alloc_list_head(),
11341 al->alloc_list_tail());
11344 void allocator::thread_item_front (uint8_t* item, size_t size)
11346 //find right free list
11347 size_t sz = frst_bucket_size;
11348 unsigned int a_l_number = 0;
11349 for (; a_l_number < (num_buckets-1); a_l_number++)
11357 alloc_list* al = &alloc_list_of (a_l_number);
11358 free_list_slot (item) = al->alloc_list_head();
11359 free_list_undo (item) = UNDO_EMPTY;
11361 if (al->alloc_list_tail() == 0)
11363 al->alloc_list_tail() = al->alloc_list_head();
11365 al->alloc_list_head() = item;
11366 if (al->alloc_list_tail() == 0)
11368 al->alloc_list_tail() = item;
11372 void allocator::copy_to_alloc_list (alloc_list* toalist)
11374 for (unsigned int i = 0; i < num_buckets; i++)
11376 toalist [i] = alloc_list_of (i);
11377 #ifdef FL_VERIFICATION
11378 uint8_t* free_item = alloc_list_head_of (i);
11383 free_item = free_list_slot (free_item);
11386 toalist[i].item_count = count;
11387 #endif //FL_VERIFICATION
11391 void allocator::copy_from_alloc_list (alloc_list* fromalist)
11393 BOOL repair_list = !discard_if_no_fit_p ();
11394 for (unsigned int i = 0; i < num_buckets; i++)
11396 size_t count = alloc_list_damage_count_of (i);
11397 alloc_list_of (i) = fromalist [i];
11398 assert (alloc_list_damage_count_of (i) == 0);
11402 //repair the the list
11403 //new items may have been added during the plan phase
11404 //items may have been unlinked.
11405 uint8_t* free_item = alloc_list_head_of (i);
11406 while (free_item && count)
11408 assert (((CObjectHeader*)free_item)->IsFree());
11409 if ((free_list_undo (free_item) != UNDO_EMPTY))
11412 free_list_slot (free_item) = free_list_undo (free_item);
11413 free_list_undo (free_item) = UNDO_EMPTY;
11416 free_item = free_list_slot (free_item);
11419 #ifdef FL_VERIFICATION
11420 free_item = alloc_list_head_of (i);
11421 size_t item_count = 0;
11425 free_item = free_list_slot (free_item);
11428 assert (item_count == alloc_list_of (i).item_count);
11429 #endif //FL_VERIFICATION
11432 uint8_t* tail_item = alloc_list_tail_of (i);
11433 assert ((tail_item == 0) || (free_list_slot (tail_item) == 0));
11438 void allocator::commit_alloc_list_changes()
11440 BOOL repair_list = !discard_if_no_fit_p ();
11443 for (unsigned int i = 0; i < num_buckets; i++)
11445 //remove the undo info from list.
11446 uint8_t* free_item = alloc_list_head_of (i);
11447 size_t count = alloc_list_damage_count_of (i);
11448 while (free_item && count)
11450 assert (((CObjectHeader*)free_item)->IsFree());
11452 if (free_list_undo (free_item) != UNDO_EMPTY)
11454 free_list_undo (free_item) = UNDO_EMPTY;
11458 free_item = free_list_slot (free_item);
11461 alloc_list_damage_count_of (i) = 0;
11466 void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size,
11467 alloc_context* acontext, heap_segment* seg,
11468 int align_const, int gen_number)
11470 bool loh_p = (gen_number > 0);
11471 GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
11473 size_t aligned_min_obj_size = Align(min_obj_size, align_const);
11477 assert (heap_segment_used (seg) <= heap_segment_committed (seg));
11480 #ifdef MULTIPLE_HEAPS
11481 if (gen_number == 0)
11483 if (!gen0_allocated_after_gc_p)
11485 gen0_allocated_after_gc_p = true;
11488 #endif //MULTIPLE_HEAPS
11490 dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start,
11491 (size_t)start + limit_size - aligned_min_obj_size));
11493 if ((acontext->alloc_limit != start) &&
11494 (acontext->alloc_limit + aligned_min_obj_size)!= start)
11496 uint8_t* hole = acontext->alloc_ptr;
11499 size_t size = (acontext->alloc_limit - acontext->alloc_ptr);
11500 dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + size + Align (min_obj_size, align_const)));
11501 // when we are finishing an allocation from a free list
11502 // we know that the free area was Align(min_obj_size) larger
11503 acontext->alloc_bytes -= size;
11504 size_t free_obj_size = size + aligned_min_obj_size;
11505 make_unused_array (hole, free_obj_size);
11506 generation_free_obj_space (generation_of (gen_number)) += free_obj_size;
11508 acontext->alloc_ptr = start;
11512 if (gen_number == 0)
11514 size_t pad_size = Align (min_obj_size, align_const);
11515 make_unused_array (acontext->alloc_ptr, pad_size);
11516 dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)",
11517 acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size));
11518 acontext->alloc_ptr += pad_size;
11521 acontext->alloc_limit = (start + limit_size - aligned_min_obj_size);
11522 acontext->alloc_bytes += limit_size - ((gen_number < max_generation + 1) ? aligned_min_obj_size : 0);
11524 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
11525 if (g_fEnableAppDomainMonitoring)
11527 GCToEEInterface::RecordAllocatedBytesForHeap(limit_size, heap_number);
11529 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
11531 uint8_t* saved_used = 0;
11535 saved_used = heap_segment_used (seg);
11538 if (seg == ephemeral_heap_segment)
11540 //Sometimes the allocated size is advanced without clearing the
11541 //memory. Let's catch up here
11542 if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
11545 #ifndef BACKGROUND_GC
11546 clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
11547 #endif //BACKGROUND_GC
11548 #endif //MARK_ARRAY
11549 heap_segment_used (seg) = alloc_allocated - plug_skew;
11552 #ifdef BACKGROUND_GC
11555 uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size;
11556 #ifdef FEATURE_LOH_COMPACTION
11557 old_allocated -= Align (loh_padding_obj_size, align_const);
11558 #endif //FEATURE_LOH_COMPACTION
11560 assert (heap_segment_used (seg) >= old_allocated);
11562 #endif //BACKGROUND_GC
11564 (start - plug_skew + limit_size) <= heap_segment_used (seg))
11566 add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
11567 leave_spin_lock (msl);
11568 dprintf (3, ("clearing memory at %Ix for %d bytes", (start - plug_skew), limit_size));
11569 memclr (start - plug_skew, limit_size);
11573 uint8_t* used = heap_segment_used (seg);
11574 heap_segment_used (seg) = start + limit_size - plug_skew;
11576 add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
11577 leave_spin_lock (msl);
11579 if ((start - plug_skew) < used)
11581 if (used != saved_used)
11586 dprintf (2, ("clearing memory before used at %Ix for %Id bytes",
11587 (start - plug_skew), (plug_skew + used - start)));
11588 memclr (start - plug_skew, used - (start - plug_skew));
11592 //this portion can be done after we release the lock
11593 if (seg == ephemeral_heap_segment)
11595 #ifdef FFIND_OBJECT
11596 if (gen0_must_clear_bricks > 0)
11598 //set the brick table to speed up find_object
11599 size_t b = brick_of (acontext->alloc_ptr);
11600 set_brick (b, acontext->alloc_ptr - brick_address (b));
11602 dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[",
11603 b, brick_of (align_on_brick (start + limit_size))));
11604 volatile short* x = &brick_table [b];
11605 short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))];
11607 for (;x < end_x;x++)
11611 #endif //FFIND_OBJECT
11613 gen0_bricks_cleared = FALSE;
11617 // verifying the memory is completely cleared.
11618 //verify_mem_cleared (start - plug_skew, limit_size);
11621 size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number)
11623 dynamic_data* dd = dynamic_data_of (gen_number);
11624 ptrdiff_t new_alloc = dd_new_allocation (dd);
11625 assert (new_alloc == (ptrdiff_t)Align (new_alloc,
11626 get_alignment_constant (!(gen_number == (max_generation+1)))));
11628 ptrdiff_t logical_limit = max (new_alloc, (ptrdiff_t)size);
11629 size_t limit = min (logical_limit, (ptrdiff_t)physical_limit);
11630 assert (limit == Align (limit, get_alignment_constant (!(gen_number == (max_generation+1)))));
11631 dd_new_allocation (dd) = (new_alloc - limit);
11635 size_t gc_heap::limit_from_size (size_t size, size_t physical_limit, int gen_number,
11638 size_t padded_size = size + Align (min_obj_size, align_const);
11639 // for LOH this is not true...we could select a physical_limit that's exactly the same
11641 assert ((gen_number != 0) || (physical_limit >= padded_size));
11642 size_t min_size_to_allocate = ((gen_number == 0) ? allocation_quantum : 0);
11644 // For SOH if the size asked for is very small, we want to allocate more than
11645 // just what's asked for if possible.
11646 size_t desired_size_to_allocate = max (padded_size, min_size_to_allocate);
11647 size_t new_physical_limit = min (physical_limit, desired_size_to_allocate);
11649 size_t new_limit = new_allocation_limit (padded_size,
11650 new_physical_limit,
11652 assert (new_limit >= (size + Align (min_obj_size, align_const)));
11653 dprintf (100, ("requested to allocate %Id bytes, actual size is %Id", size, new_limit));
11657 void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
11658 uint8_t* allocated, uint8_t* reserved)
11660 UNREFERENCED_PARAMETER(heap_num);
11662 if (reason == oom_budget)
11664 alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
11667 if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure)))
11669 // This means during the last GC we needed to reserve and/or commit more memory
11670 // but we couldn't. We proceeded with the GC and ended up not having enough
11671 // memory at the end. This is a legitimate OOM situtation. Otherwise we
11672 // probably made a mistake and didn't expand the heap when we should have.
11673 reason = oom_low_mem;
11676 oom_info.reason = reason;
11677 oom_info.allocated = allocated;
11678 oom_info.reserved = reserved;
11679 oom_info.alloc_size = alloc_size;
11680 oom_info.gc_index = settings.gc_index;
11681 oom_info.fgm = fgm_result.fgm;
11682 oom_info.size = fgm_result.size;
11683 oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb;
11684 oom_info.loh_p = fgm_result.loh_p;
11686 fgm_result.fgm = fgm_no_failure;
11688 // Break early - before the more_space_lock is release so no other threads
11689 // could have allocated on the same heap when OOM happened.
11690 if (GCConfig::GetBreakOnOOM())
11692 GCToOSInterface::DebugBreak();
11696 #ifdef BACKGROUND_GC
11697 BOOL gc_heap::background_allowed_p()
11699 return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) );
11701 #endif //BACKGROUND_GC
11703 void gc_heap::check_for_full_gc (int gen_num, size_t size)
11705 BOOL should_notify = FALSE;
11706 // if we detect full gc because of the allocation budget specified this is TRUE;
11707 // it's FALSE if it's due to other factors.
11708 BOOL alloc_factor = TRUE;
11711 int n_initial = gen_num;
11712 BOOL local_blocking_collection = FALSE;
11713 BOOL local_elevation_requested = FALSE;
11714 int new_alloc_remain_percent = 0;
11716 if (full_gc_approach_event_set)
11721 if (gen_num != (max_generation + 1))
11723 gen_num = max_generation;
11726 dynamic_data* dd_full = dynamic_data_of (gen_num);
11727 ptrdiff_t new_alloc_remain = 0;
11728 uint32_t pct = ((gen_num == (max_generation + 1)) ? fgn_loh_percent : fgn_maxgen_percent);
11730 for (int gen_index = 0; gen_index <= (max_generation + 1); gen_index++)
11732 dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)",
11733 heap_number, gen_index,
11734 dd_new_allocation (dynamic_data_of (gen_index)),
11735 dd_desired_allocation (dynamic_data_of (gen_index))));
11738 // For small object allocations we only check every fgn_check_quantum bytes.
11739 if (n_initial == 0)
11741 dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc));
11742 dynamic_data* dd_0 = dynamic_data_of (n_initial);
11743 if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) &&
11744 (dd_new_allocation (dd_0) >= 0))
11750 fgn_last_alloc = dd_new_allocation (dd_0);
11751 dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc));
11754 // We don't consider the size that came from soh 'cause it doesn't contribute to the
11759 for (i = n+1; i <= max_generation; i++)
11761 if (get_new_allocation (i) <= 0)
11763 n = min (i, max_generation);
11769 dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n));
11770 if (gen_num == max_generation)
11772 // If it's small object heap we should first see if we will even be looking at gen2 budget
11773 // in the next GC or not. If not we should go directly to checking other factors.
11774 if (n < (max_generation - 1))
11776 goto check_other_factors;
11780 new_alloc_remain = dd_new_allocation (dd_full) - size;
11782 new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100);
11784 dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%",
11785 gen_num, pct, new_alloc_remain_percent));
11787 if (new_alloc_remain_percent <= (int)pct)
11789 #ifdef BACKGROUND_GC
11790 // If background GC is enabled, we still want to check whether this will
11791 // be a blocking GC or not because we only want to notify when it's a
11792 // blocking full GC.
11793 if (background_allowed_p())
11795 goto check_other_factors;
11797 #endif //BACKGROUND_GC
11799 should_notify = TRUE;
11803 check_other_factors:
11805 dprintf (2, ("FGC: checking other factors"));
11806 n = generation_to_condemn (n,
11807 &local_blocking_collection,
11808 &local_elevation_requested,
11811 if (local_elevation_requested && (n == max_generation))
11813 if (settings.should_lock_elevation)
11815 int local_elevation_locked_count = settings.elevation_locked_count + 1;
11816 if (local_elevation_locked_count != 6)
11818 dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1",
11819 local_elevation_locked_count));
11820 n = max_generation - 1;
11825 dprintf (2, ("FGN: we estimate gen%d will be collected", n));
11827 #ifdef BACKGROUND_GC
11828 // When background GC is enabled it decreases the accuracy of our predictability -
11829 // by the time the GC happens, we may not be under BGC anymore. If we try to
11830 // predict often enough it should be ok.
11831 if ((n == max_generation) &&
11832 (recursive_gc_sync::background_running_p()))
11834 n = max_generation - 1;
11835 dprintf (2, ("FGN: bgc - 1 instead of 2"));
11838 if ((n == max_generation) && !local_blocking_collection)
11840 if (!background_allowed_p())
11842 local_blocking_collection = TRUE;
11845 #endif //BACKGROUND_GC
11847 dprintf (2, ("FGN: we estimate gen%d will be collected: %s",
11849 (local_blocking_collection ? "blocking" : "background")));
11851 if ((n == max_generation) && local_blocking_collection)
11853 alloc_factor = FALSE;
11854 should_notify = TRUE;
11862 dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)",
11864 (alloc_factor ? "alloc" : "other"),
11865 dd_collection_count (dynamic_data_of (0)),
11866 new_alloc_remain_percent,
11869 send_full_gc_notification (n_initial, alloc_factor);
11873 void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p)
11875 if (!full_gc_approach_event_set)
11877 assert (full_gc_approach_event.IsValid());
11878 FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p);
11880 full_gc_end_event.Reset();
11881 full_gc_approach_event.Set();
11882 full_gc_approach_event_set = true;
11886 wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms)
11888 if (fgn_maxgen_percent == 0)
11890 return wait_full_gc_na;
11893 uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms);
11895 if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT))
11897 if (fgn_maxgen_percent == 0)
11899 return wait_full_gc_cancelled;
11902 if (wait_result == WAIT_OBJECT_0)
11904 #ifdef BACKGROUND_GC
11905 if (fgn_last_gc_was_concurrent)
11907 fgn_last_gc_was_concurrent = FALSE;
11908 return wait_full_gc_na;
11911 #endif //BACKGROUND_GC
11913 return wait_full_gc_success;
11918 return wait_full_gc_timeout;
11923 return wait_full_gc_failed;
11927 size_t gc_heap::get_full_compact_gc_count()
11929 return full_gc_counts[gc_type_compacting];
11932 // DTREVIEW - we should check this in dt_low_ephemeral_space_p
11935 BOOL gc_heap::short_on_end_of_seg (int gen_number,
11939 UNREFERENCED_PARAMETER(gen_number);
11940 uint8_t* allocated = heap_segment_allocated(seg);
11942 BOOL sufficient_p = sufficient_space_end_seg (allocated,
11943 heap_segment_reserved (seg),
11944 end_space_after_gc(),
11945 tuning_deciding_short_on_seg);
11948 if (sufficient_gen0_space_p)
11950 dprintf (GTC_LOG, ("gen0 has enough free space"));
11953 sufficient_p = sufficient_gen0_space_p;
11956 return !sufficient_p;
11960 #pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
11964 BOOL gc_heap::a_fit_free_list_p (int gen_number,
11966 alloc_context* acontext,
11969 BOOL can_fit = FALSE;
11970 generation* gen = generation_of (gen_number);
11971 allocator* gen_allocator = generation_allocator (gen);
11972 size_t sz_list = gen_allocator->first_bucket_size();
11973 for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
11975 if ((size < sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
11977 uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
11978 uint8_t* prev_free_item = 0;
11980 while (free_list != 0)
11982 dprintf (3, ("considering free list %Ix", (size_t)free_list));
11983 size_t free_list_size = unused_array_size (free_list);
11984 if ((size + Align (min_obj_size, align_const)) <= free_list_size)
11986 dprintf (3, ("Found adequate unused area: [%Ix, size: %Id",
11987 (size_t)free_list, free_list_size));
11989 gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
11990 // We ask for more Align (min_obj_size)
11991 // to make sure that we can insert a free object
11992 // in adjust_limit will set the limit lower
11993 size_t limit = limit_from_size (size, free_list_size, gen_number, align_const);
11995 uint8_t* remain = (free_list + limit);
11996 size_t remain_size = (free_list_size - limit);
11997 if (remain_size >= Align(min_free_list, align_const))
11999 make_unused_array (remain, remain_size);
12000 gen_allocator->thread_item_front (remain, remain_size);
12001 assert (remain_size >= Align (min_obj_size, align_const));
12005 //absorb the entire free list
12006 limit += remain_size;
12008 generation_free_list_space (gen) -= limit;
12010 adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
12015 else if (gen_allocator->discard_if_no_fit_p())
12017 assert (prev_free_item == 0);
12018 dprintf (3, ("couldn't use this free area, discarding"));
12019 generation_free_obj_space (gen) += free_list_size;
12021 gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
12022 generation_free_list_space (gen) -= free_list_size;
12026 prev_free_item = free_list;
12028 free_list = free_list_slot (free_list);
12031 sz_list = sz_list * 2;
12038 #ifdef BACKGROUND_GC
12039 void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
12041 alloc_context* acontext,
12047 make_unused_array (alloc_start, size);
12049 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
12050 if (g_fEnableAppDomainMonitoring)
12052 GCToEEInterface::RecordAllocatedBytesForHeap(size, heap_number);
12054 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
12056 size_t size_of_array_base = sizeof(ArrayBase);
12058 bgc_alloc_lock->loh_alloc_done_with_index (lock_index);
12060 // clear memory while not holding the lock.
12061 size_t size_to_skip = size_of_array_base;
12062 size_t size_to_clear = size - size_to_skip - plug_skew;
12063 size_t saved_size_to_clear = size_to_clear;
12066 uint8_t* end = alloc_start + size - plug_skew;
12067 uint8_t* used = heap_segment_used (seg);
12070 if ((alloc_start + size_to_skip) < used)
12072 size_to_clear = used - (alloc_start + size_to_skip);
12078 dprintf (2, ("bgc loh: setting used to %Ix", end));
12079 heap_segment_used (seg) = end;
12082 dprintf (2, ("bgc loh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes",
12083 used, alloc_start, end, size_to_clear));
12087 dprintf (2, ("bgc loh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size));
12091 // since we filled in 0xcc for free object when we verify heap,
12092 // we need to make sure we clear those bytes.
12093 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
12095 if (size_to_clear < saved_size_to_clear)
12097 size_to_clear = saved_size_to_clear;
12100 #endif //VERIFY_HEAP
12102 dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number));
12103 add_saved_spinlock_info (true, me_release, mt_clr_large_mem);
12104 leave_spin_lock (&more_space_lock_loh);
12105 memclr (alloc_start + size_to_skip, size_to_clear);
12107 bgc_alloc_lock->loh_alloc_set (alloc_start);
12109 acontext->alloc_ptr = alloc_start;
12110 acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const));
12112 // need to clear the rest of the object before we hand it out.
12113 clear_unused_array(alloc_start, size);
12115 #endif //BACKGROUND_GC
12117 BOOL gc_heap::a_fit_free_list_large_p (size_t size,
12118 alloc_context* acontext,
12121 BOOL can_fit = FALSE;
12122 int gen_number = max_generation + 1;
12123 generation* gen = generation_of (gen_number);
12124 allocator* loh_allocator = generation_allocator (gen);
12126 #ifdef FEATURE_LOH_COMPACTION
12127 size_t loh_pad = Align (loh_padding_obj_size, align_const);
12128 #endif //FEATURE_LOH_COMPACTION
12130 #ifdef BACKGROUND_GC
12132 #endif //BACKGROUND_GC
12133 size_t sz_list = loh_allocator->first_bucket_size();
12134 for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
12136 if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
12138 uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
12139 uint8_t* prev_free_item = 0;
12140 while (free_list != 0)
12142 dprintf (3, ("considering free list %Ix", (size_t)free_list));
12144 size_t free_list_size = unused_array_size(free_list);
12146 #ifdef FEATURE_LOH_COMPACTION
12147 if ((size + loh_pad) <= free_list_size)
12149 if (((size + Align (min_obj_size, align_const)) <= free_list_size)||
12150 (size == free_list_size))
12151 #endif //FEATURE_LOH_COMPACTION
12153 #ifdef BACKGROUND_GC
12154 cookie = bgc_alloc_lock->loh_alloc_set (free_list);
12155 bgc_track_loh_alloc();
12156 #endif //BACKGROUND_GC
12158 //unlink the free_item
12159 loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
12161 // Substract min obj size because limit_from_size adds it. Not needed for LOH
12162 size_t limit = limit_from_size (size - Align(min_obj_size, align_const), free_list_size,
12163 gen_number, align_const);
12165 #ifdef FEATURE_LOH_COMPACTION
12166 make_unused_array (free_list, loh_pad);
12168 free_list += loh_pad;
12169 free_list_size -= loh_pad;
12170 #endif //FEATURE_LOH_COMPACTION
12172 uint8_t* remain = (free_list + limit);
12173 size_t remain_size = (free_list_size - limit);
12174 if (remain_size != 0)
12176 assert (remain_size >= Align (min_obj_size, align_const));
12177 make_unused_array (remain, remain_size);
12179 if (remain_size >= Align(min_free_list, align_const))
12181 loh_thread_gap_front (remain, remain_size, gen);
12182 assert (remain_size >= Align (min_obj_size, align_const));
12186 generation_free_obj_space (gen) += remain_size;
12188 generation_free_list_space (gen) -= free_list_size;
12189 dprintf (3, ("found fit on loh at %Ix", free_list));
12190 #ifdef BACKGROUND_GC
12193 bgc_loh_alloc_clr (free_list, limit, acontext, align_const, cookie, FALSE, 0);
12196 #endif //BACKGROUND_GC
12198 adjust_limit_clr (free_list, limit, acontext, 0, align_const, gen_number);
12201 //fix the limit to compensate for adjust_limit_clr making it too short
12202 acontext->alloc_limit += Align (min_obj_size, align_const);
12206 prev_free_item = free_list;
12207 free_list = free_list_slot (free_list);
12210 sz_list = sz_list * 2;
12217 #pragma warning(default:4706)
12220 BOOL gc_heap::a_fit_segment_end_p (int gen_number,
12223 alloc_context* acontext,
12225 BOOL* commit_failed_p)
12227 *commit_failed_p = FALSE;
12229 bool hard_limit_short_seg_end_p = false;
12230 #ifdef BACKGROUND_GC
12232 #endif //BACKGROUND_GC
12234 uint8_t*& allocated = ((gen_number == 0) ?
12236 heap_segment_allocated(seg));
12238 size_t pad = Align (min_obj_size, align_const);
12240 #ifdef FEATURE_LOH_COMPACTION
12241 size_t loh_pad = Align (loh_padding_obj_size, align_const);
12242 if (gen_number == (max_generation + 1))
12246 #endif //FEATURE_LOH_COMPACTION
12248 uint8_t* end = heap_segment_committed (seg) - pad;
12250 if (a_size_fit_p (size, allocated, end, align_const))
12252 limit = limit_from_size (size,
12254 gen_number, align_const);
12258 end = heap_segment_reserved (seg) - pad;
12260 if (a_size_fit_p (size, allocated, end, align_const))
12262 limit = limit_from_size (size,
12264 gen_number, align_const);
12266 if (grow_heap_segment (seg, (allocated + limit), &hard_limit_short_seg_end_p))
12272 if (!hard_limit_short_seg_end_p)
12274 dprintf (2, ("can't grow segment, doing a full gc"));
12275 *commit_failed_p = TRUE;
12279 assert (heap_hard_limit);
12288 #ifdef BACKGROUND_GC
12289 if (gen_number != 0)
12291 cookie = bgc_alloc_lock->loh_alloc_set (allocated);
12292 bgc_track_loh_alloc();
12294 #endif //BACKGROUND_GC
12296 uint8_t* old_alloc;
12297 old_alloc = allocated;
12298 #ifdef FEATURE_LOH_COMPACTION
12299 if (gen_number == (max_generation + 1))
12301 make_unused_array (old_alloc, loh_pad);
12302 old_alloc += loh_pad;
12303 allocated += loh_pad;
12306 #endif //FEATURE_LOH_COMPACTION
12308 #if defined (VERIFY_HEAP) && defined (_DEBUG)
12309 ((void**) allocated)[-1] = 0; //clear the sync block
12310 #endif //VERIFY_HEAP && _DEBUG
12311 allocated += limit;
12313 dprintf (3, ("found fit at end of seg: %Ix", old_alloc));
12315 #ifdef BACKGROUND_GC
12318 bgc_loh_alloc_clr (old_alloc, limit, acontext, align_const, cookie, TRUE, seg);
12321 #endif //BACKGROUND_GC
12323 adjust_limit_clr (old_alloc, limit, acontext, seg, align_const, gen_number);
12333 BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
12335 alloc_context* acontext,
12337 BOOL* commit_failed_p,
12340 *commit_failed_p = FALSE;
12341 heap_segment* seg = generation_allocation_segment (generation_of (gen_number));
12342 BOOL can_allocate_p = FALSE;
12346 #ifdef BACKGROUND_GC
12347 if (seg->flags & heap_segment_flags_loh_delete)
12349 dprintf (3, ("h%d skipping seg %Ix to be deleted", heap_number, (size_t)seg));
12352 #endif //BACKGROUND_GC
12354 if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)),
12355 acontext, align_const, commit_failed_p))
12357 acontext->alloc_limit += Align (min_obj_size, align_const);
12358 can_allocate_p = TRUE;
12362 if (*commit_failed_p)
12364 *oom_r = oom_cant_commit;
12369 seg = heap_segment_next_rw (seg);
12372 return can_allocate_p;
12375 #ifdef BACKGROUND_GC
12377 void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p)
12379 GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
12381 dprintf (2, ("BGC is already in progress, waiting for it to finish"));
12382 add_saved_spinlock_info (loh_p, me_release, mt_wait_bgc);
12383 leave_spin_lock (msl);
12384 background_gc_wait (awr);
12385 enter_spin_lock (msl);
12386 add_saved_spinlock_info (loh_p, me_acquire, mt_wait_bgc);
12389 void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p)
12391 if (recursive_gc_sync::background_running_p())
12393 uint32_t memory_load;
12394 get_memory_info (&memory_load);
12395 if (memory_load >= m_high_memory_load_th)
12397 dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
12398 wait_for_background (awr, loh_p);
12403 #endif //BACKGROUND_GC
12405 // We request to trigger an ephemeral GC but we may get a full compacting GC.
12406 // return TRUE if that's the case.
12407 BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr)
12409 #ifdef BACKGROUND_GC
12410 wait_for_bgc_high_memory (awr_loh_oos_bgc, false);
12411 #endif //BACKGROUND_GC
12413 BOOL did_full_compact_gc = FALSE;
12415 dprintf (2, ("triggering a gen1 GC"));
12416 size_t last_full_compact_gc_count = get_full_compact_gc_count();
12417 vm_heap->GarbageCollectGeneration(max_generation - 1, gr);
12419 #ifdef MULTIPLE_HEAPS
12420 enter_spin_lock (&more_space_lock_soh);
12421 add_saved_spinlock_info (false, me_acquire, mt_t_eph_gc);
12422 #endif //MULTIPLE_HEAPS
12424 size_t current_full_compact_gc_count = get_full_compact_gc_count();
12426 if (current_full_compact_gc_count > last_full_compact_gc_count)
12428 dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC"));
12429 did_full_compact_gc = TRUE;
12432 return did_full_compact_gc;
12435 BOOL gc_heap::soh_try_fit (int gen_number,
12437 alloc_context* acontext,
12439 BOOL* commit_failed_p,
12440 BOOL* short_seg_end_p)
12442 BOOL can_allocate = TRUE;
12443 if (short_seg_end_p)
12445 *short_seg_end_p = FALSE;
12448 can_allocate = a_fit_free_list_p (gen_number, size, acontext, align_const);
12451 if (short_seg_end_p)
12453 *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
12455 // If the caller doesn't care, we always try to fit at the end of seg;
12456 // otherwise we would only try if we are actually not short at end of seg.
12457 if (!short_seg_end_p || !(*short_seg_end_p))
12459 can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size,
12460 acontext, align_const, commit_failed_p);
12464 return can_allocate;
12467 allocation_state gc_heap::allocate_small (int gen_number,
12469 alloc_context* acontext,
12472 #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
12473 if (recursive_gc_sync::background_running_p())
12475 background_soh_alloc_count++;
12476 if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
12478 add_saved_spinlock_info (false, me_release, mt_alloc_small);
12479 leave_spin_lock (&more_space_lock_soh);
12480 bool cooperative_mode = enable_preemptive();
12481 GCToOSInterface::Sleep (bgc_alloc_spin);
12482 disable_preemptive (cooperative_mode);
12483 enter_spin_lock (&more_space_lock_soh);
12484 add_saved_spinlock_info (false, me_acquire, mt_alloc_small);
12488 //GCToOSInterface::YieldThread (0);
12491 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
12493 gc_reason gr = reason_oos_soh;
12494 oom_reason oom_r = oom_no_failure;
12496 // No variable values should be "carried over" from one state to the other.
12497 // That's why there are local variable for each state
12499 allocation_state soh_alloc_state = a_state_start;
12501 // If we can get a new seg it means allocation will succeed.
12504 dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state]));
12506 switch (soh_alloc_state)
12508 case a_state_can_allocate:
12509 case a_state_cant_allocate:
12513 case a_state_start:
12515 soh_alloc_state = a_state_try_fit;
12518 case a_state_try_fit:
12520 BOOL commit_failed_p = FALSE;
12521 BOOL can_use_existing_p = FALSE;
12523 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12524 align_const, &commit_failed_p,
12526 soh_alloc_state = (can_use_existing_p ?
12527 a_state_can_allocate :
12529 a_state_trigger_full_compact_gc :
12530 a_state_trigger_ephemeral_gc));
12533 case a_state_try_fit_after_bgc:
12535 BOOL commit_failed_p = FALSE;
12536 BOOL can_use_existing_p = FALSE;
12537 BOOL short_seg_end_p = FALSE;
12539 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12540 align_const, &commit_failed_p,
12542 soh_alloc_state = (can_use_existing_p ?
12543 a_state_can_allocate :
12545 a_state_trigger_2nd_ephemeral_gc :
12546 a_state_trigger_full_compact_gc));
12549 case a_state_try_fit_after_cg:
12551 BOOL commit_failed_p = FALSE;
12552 BOOL can_use_existing_p = FALSE;
12553 BOOL short_seg_end_p = FALSE;
12555 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12556 align_const, &commit_failed_p,
12559 if (can_use_existing_p)
12561 soh_alloc_state = a_state_can_allocate;
12563 #ifdef MULTIPLE_HEAPS
12564 else if (gen0_allocated_after_gc_p)
12566 // some other threads already grabbed the more space lock and allocated
12567 // so we should attempt an ephemeral GC again.
12568 soh_alloc_state = a_state_trigger_ephemeral_gc;
12570 #endif //MULTIPLE_HEAPS
12571 else if (short_seg_end_p)
12573 soh_alloc_state = a_state_cant_allocate;
12574 oom_r = oom_budget;
12578 assert (commit_failed_p);
12579 soh_alloc_state = a_state_cant_allocate;
12580 oom_r = oom_cant_commit;
12584 case a_state_check_and_wait_for_bgc:
12586 BOOL bgc_in_progress_p = FALSE;
12587 BOOL did_full_compacting_gc = FALSE;
12589 bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc, false);
12590 soh_alloc_state = (did_full_compacting_gc ?
12591 a_state_try_fit_after_cg :
12592 a_state_try_fit_after_bgc);
12595 case a_state_trigger_ephemeral_gc:
12597 BOOL commit_failed_p = FALSE;
12598 BOOL can_use_existing_p = FALSE;
12599 BOOL short_seg_end_p = FALSE;
12600 BOOL bgc_in_progress_p = FALSE;
12601 BOOL did_full_compacting_gc = FALSE;
12603 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12604 if (did_full_compacting_gc)
12606 soh_alloc_state = a_state_try_fit_after_cg;
12610 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12611 align_const, &commit_failed_p,
12613 #ifdef BACKGROUND_GC
12614 bgc_in_progress_p = recursive_gc_sync::background_running_p();
12615 #endif //BACKGROUND_GC
12617 if (can_use_existing_p)
12619 soh_alloc_state = a_state_can_allocate;
12623 if (short_seg_end_p)
12625 if (should_expand_in_full_gc)
12627 dprintf (2, ("gen1 GC wanted to expand!"));
12628 soh_alloc_state = a_state_trigger_full_compact_gc;
12632 soh_alloc_state = (bgc_in_progress_p ?
12633 a_state_check_and_wait_for_bgc :
12634 a_state_trigger_full_compact_gc);
12637 else if (commit_failed_p)
12639 soh_alloc_state = a_state_trigger_full_compact_gc;
12643 #ifdef MULTIPLE_HEAPS
12644 // some other threads already grabbed the more space lock and allocated
12645 // so we should attempt an ephemeral GC again.
12646 assert (gen0_allocated_after_gc_p);
12647 soh_alloc_state = a_state_trigger_ephemeral_gc;
12648 #else //MULTIPLE_HEAPS
12649 assert (!"shouldn't get here");
12650 #endif //MULTIPLE_HEAPS
12656 case a_state_trigger_2nd_ephemeral_gc:
12658 BOOL commit_failed_p = FALSE;
12659 BOOL can_use_existing_p = FALSE;
12660 BOOL short_seg_end_p = FALSE;
12661 BOOL did_full_compacting_gc = FALSE;
12664 did_full_compacting_gc = trigger_ephemeral_gc (gr);
12666 if (did_full_compacting_gc)
12668 soh_alloc_state = a_state_try_fit_after_cg;
12672 can_use_existing_p = soh_try_fit (gen_number, size, acontext,
12673 align_const, &commit_failed_p,
12675 if (short_seg_end_p || commit_failed_p)
12677 soh_alloc_state = a_state_trigger_full_compact_gc;
12681 assert (can_use_existing_p);
12682 soh_alloc_state = a_state_can_allocate;
12687 case a_state_trigger_full_compact_gc:
12689 if (fgn_maxgen_percent)
12691 dprintf (2, ("FGN: SOH doing last GC before we throw OOM"));
12692 send_full_gc_notification (max_generation, FALSE);
12695 BOOL got_full_compacting_gc = FALSE;
12697 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, false);
12698 soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
12703 assert (!"Invalid state!");
12710 if (soh_alloc_state == a_state_cant_allocate)
12712 assert (oom_r != oom_no_failure);
12713 handle_oom (heap_number,
12716 heap_segment_allocated (ephemeral_heap_segment),
12717 heap_segment_reserved (ephemeral_heap_segment));
12719 add_saved_spinlock_info (false, me_release, mt_alloc_small_cant);
12720 leave_spin_lock (&more_space_lock_soh);
12723 assert ((soh_alloc_state == a_state_can_allocate) ||
12724 (soh_alloc_state == a_state_cant_allocate) ||
12725 (soh_alloc_state == a_state_retry_allocate));
12727 return soh_alloc_state;
12730 #ifdef BACKGROUND_GC
12732 void gc_heap::bgc_track_loh_alloc()
12734 if (current_c_gc_state == c_gc_state_planning)
12736 Interlocked::Increment (&loh_alloc_thread_count);
12737 dprintf (3, ("h%d: inc lc: %d", heap_number, (int32_t)loh_alloc_thread_count));
12742 void gc_heap::bgc_untrack_loh_alloc()
12744 if (current_c_gc_state == c_gc_state_planning)
12746 Interlocked::Decrement (&loh_alloc_thread_count);
12747 dprintf (3, ("h%d: dec lc: %d", heap_number, (int32_t)loh_alloc_thread_count));
12751 BOOL gc_heap::bgc_loh_should_allocate()
12753 size_t min_gc_size = dd_min_size (dynamic_data_of (max_generation + 1));
12755 if ((bgc_begin_loh_size + bgc_loh_size_increased) < (min_gc_size * 10))
12760 if (((bgc_begin_loh_size / end_loh_size) >= 2) || (bgc_loh_size_increased >= bgc_begin_loh_size))
12762 if ((bgc_begin_loh_size / end_loh_size) > 2)
12764 dprintf (3, ("alloc-ed too much before bgc started"));
12768 dprintf (3, ("alloc-ed too much after bgc started"));
12774 bgc_alloc_spin_loh = (uint32_t)(((float)bgc_loh_size_increased / (float)bgc_begin_loh_size) * 10);
12778 #endif //BACKGROUND_GC
12780 size_t gc_heap::get_large_seg_size (size_t size)
12782 size_t default_seg_size = min_loh_segment_size;
12783 #ifdef SEG_MAPPING_TABLE
12784 size_t align_size = default_seg_size;
12785 #else //SEG_MAPPING_TABLE
12786 size_t align_size = default_seg_size / 2;
12787 #endif //SEG_MAPPING_TABLE
12788 int align_const = get_alignment_constant (FALSE);
12789 size_t large_seg_size = align_on_page (
12790 max (default_seg_size,
12791 ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE +
12792 align_size) / align_size * align_size)));
12793 return large_seg_size;
12796 BOOL gc_heap::loh_get_new_seg (generation* gen,
12799 BOOL* did_full_compact_gc,
12802 UNREFERENCED_PARAMETER(gen);
12803 UNREFERENCED_PARAMETER(align_const);
12805 *did_full_compact_gc = FALSE;
12807 size_t seg_size = get_large_seg_size (size);
12809 heap_segment* new_seg = get_large_segment (seg_size, did_full_compact_gc);
12813 loh_alloc_since_cg += seg_size;
12820 return (new_seg != 0);
12823 // PERF TODO: this is too aggressive; and in hard limit we should
12824 // count the actual allocated bytes instead of only updating it during
12825 // getting a new seg.
12826 BOOL gc_heap::retry_full_compact_gc (size_t size)
12828 size_t seg_size = get_large_seg_size (size);
12830 if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size))
12835 #ifdef MULTIPLE_HEAPS
12836 uint64_t total_alloc_size = 0;
12837 for (int i = 0; i < n_heaps; i++)
12839 total_alloc_size += g_heaps[i]->loh_alloc_since_cg;
12842 if (total_alloc_size >= (2 * (uint64_t)seg_size))
12846 #endif //MULTIPLE_HEAPS
12851 BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
12852 BOOL* did_full_compact_gc,
12855 BOOL bgc_in_progress = FALSE;
12856 *did_full_compact_gc = FALSE;
12857 #ifdef BACKGROUND_GC
12858 if (recursive_gc_sync::background_running_p())
12860 bgc_in_progress = TRUE;
12861 size_t last_full_compact_gc_count = get_full_compact_gc_count();
12862 wait_for_background (awr, loh_p);
12863 size_t current_full_compact_gc_count = get_full_compact_gc_count();
12864 if (current_full_compact_gc_count > last_full_compact_gc_count)
12866 *did_full_compact_gc = TRUE;
12869 #endif //BACKGROUND_GC
12871 return bgc_in_progress;
12874 BOOL gc_heap::loh_try_fit (int gen_number,
12876 alloc_context* acontext,
12878 BOOL* commit_failed_p,
12881 BOOL can_allocate = TRUE;
12883 if (!a_fit_free_list_large_p (size, acontext, align_const))
12885 can_allocate = loh_a_fit_segment_end_p (gen_number, size,
12886 acontext, align_const,
12887 commit_failed_p, oom_r);
12889 #ifdef BACKGROUND_GC
12890 if (can_allocate && recursive_gc_sync::background_running_p())
12892 bgc_loh_size_increased += size;
12894 #endif //BACKGROUND_GC
12896 #ifdef BACKGROUND_GC
12899 if (recursive_gc_sync::background_running_p())
12901 bgc_loh_allocated_in_free += size;
12904 #endif //BACKGROUND_GC
12906 return can_allocate;
12909 BOOL gc_heap::trigger_full_compact_gc (gc_reason gr,
12913 BOOL did_full_compact_gc = FALSE;
12915 size_t last_full_compact_gc_count = get_full_compact_gc_count();
12917 // Set this so the next GC will be a full compacting GC.
12918 if (!last_gc_before_oom)
12920 last_gc_before_oom = TRUE;
12923 #ifdef BACKGROUND_GC
12924 if (recursive_gc_sync::background_running_p())
12926 wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p);
12927 dprintf (2, ("waited for BGC - done"));
12929 #endif //BACKGROUND_GC
12931 GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
12932 size_t current_full_compact_gc_count = get_full_compact_gc_count();
12933 if (current_full_compact_gc_count > last_full_compact_gc_count)
12935 dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count));
12936 assert (current_full_compact_gc_count > last_full_compact_gc_count);
12937 did_full_compact_gc = TRUE;
12941 dprintf (3, ("h%d full GC", heap_number));
12943 trigger_gc_for_alloc (max_generation, gr, msl, loh_p, mt_t_full_gc);
12945 current_full_compact_gc_count = get_full_compact_gc_count();
12947 if (current_full_compact_gc_count == last_full_compact_gc_count)
12949 dprintf (2, ("attempted to trigger a full compacting GC but didn't get it"));
12950 // We requested a full GC but didn't get because of the elevation logic
12951 // which means we should fail.
12952 *oom_r = oom_unproductive_full_gc;
12956 dprintf (3, ("h%d: T full compacting GC (%d->%d)",
12958 last_full_compact_gc_count,
12959 current_full_compact_gc_count));
12961 assert (current_full_compact_gc_count > last_full_compact_gc_count);
12962 did_full_compact_gc = TRUE;
12966 return did_full_compact_gc;
12969 #ifdef RECORD_LOH_STATE
12970 void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
12972 // When the state is can_allocate we already have released the more
12973 // space lock. So we are not logging states here since this code
12974 // is not thread safe.
12975 if (loh_state_to_save != a_state_can_allocate)
12977 last_loh_states[loh_state_index].alloc_state = loh_state_to_save;
12978 last_loh_states[loh_state_index].thread_id = thread_id;
12981 if (loh_state_index == max_saved_loh_states)
12983 loh_state_index = 0;
12986 assert (loh_state_index < max_saved_loh_states);
12989 #endif //RECORD_LOH_STATE
12991 bool gc_heap::should_retry_other_heap (size_t size)
12993 #ifdef MULTIPLE_HEAPS
12994 if (heap_hard_limit)
12996 size_t total_heap_committed_recorded =
12997 current_total_committed - current_total_committed_bookkeeping;
12998 size_t min_size = dd_min_size (g_heaps[0]->dynamic_data_of (max_generation + 1));
12999 size_t slack_space = max (commit_min_th, min_size);
13000 bool retry_p = ((total_heap_committed_recorded + size) < (heap_hard_limit - slack_space));
13001 dprintf (1, ("%Id - %Id - total committed %Id - size %Id = %Id, %s",
13002 heap_hard_limit, slack_space, total_heap_committed_recorded, size,
13003 (heap_hard_limit - slack_space - total_heap_committed_recorded - size),
13004 (retry_p ? "retry" : "no retry")));
13008 #endif //MULTIPLE_HEAPS
13014 allocation_state gc_heap::allocate_large (int gen_number,
13016 alloc_context* acontext,
13019 #ifdef BACKGROUND_GC
13020 if (recursive_gc_sync::background_running_p())
13022 background_loh_alloc_count++;
13023 //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0)
13025 if (bgc_loh_should_allocate())
13027 if (!bgc_alloc_spin_loh)
13029 add_saved_spinlock_info (true, me_release, mt_alloc_large);
13030 leave_spin_lock (&more_space_lock_loh);
13031 bool cooperative_mode = enable_preemptive();
13032 GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
13033 disable_preemptive (cooperative_mode);
13034 enter_spin_lock (&more_space_lock_loh);
13035 add_saved_spinlock_info (true, me_acquire, mt_alloc_large);
13036 dprintf (SPINLOCK_LOG, ("[%d]spin Emsl loh", heap_number));
13041 wait_for_background (awr_loh_alloc_during_bgc, true);
13045 #endif //BACKGROUND_GC
13047 gc_reason gr = reason_oos_loh;
13048 generation* gen = generation_of (gen_number);
13049 oom_reason oom_r = oom_no_failure;
13050 size_t current_full_compact_gc_count = 0;
13052 // No variable values should be "carried over" from one state to the other.
13053 // That's why there are local variable for each state
13054 allocation_state loh_alloc_state = a_state_start;
13055 #ifdef RECORD_LOH_STATE
13056 EEThreadId current_thread_id;
13057 current_thread_id.SetToCurrentThread();
13058 #endif //RECORD_LOH_STATE
13060 // If we can get a new seg it means allocation will succeed.
13063 dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[loh_alloc_state]));
13065 #ifdef RECORD_LOH_STATE
13066 add_saved_loh_state (loh_alloc_state, current_thread_id);
13067 #endif //RECORD_LOH_STATE
13068 switch (loh_alloc_state)
13070 case a_state_can_allocate:
13071 case a_state_cant_allocate:
13075 case a_state_start:
13077 loh_alloc_state = a_state_try_fit;
13080 case a_state_try_fit:
13082 BOOL commit_failed_p = FALSE;
13083 BOOL can_use_existing_p = FALSE;
13085 can_use_existing_p = loh_try_fit (gen_number, size, acontext,
13086 align_const, &commit_failed_p, &oom_r);
13087 loh_alloc_state = (can_use_existing_p ?
13088 a_state_can_allocate :
13090 a_state_trigger_full_compact_gc :
13091 a_state_acquire_seg));
13092 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13095 case a_state_try_fit_new_seg:
13097 BOOL commit_failed_p = FALSE;
13098 BOOL can_use_existing_p = FALSE;
13100 can_use_existing_p = loh_try_fit (gen_number, size, acontext,
13101 align_const, &commit_failed_p, &oom_r);
13102 // Even after we got a new seg it doesn't necessarily mean we can allocate,
13103 // another LOH allocating thread could have beat us to acquire the msl so
13104 // we need to try again.
13105 loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit);
13106 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13109 case a_state_try_fit_after_cg:
13111 BOOL commit_failed_p = FALSE;
13112 BOOL can_use_existing_p = FALSE;
13114 can_use_existing_p = loh_try_fit (gen_number, size, acontext,
13115 align_const, &commit_failed_p, &oom_r);
13116 // If we failed to commit, we bail right away 'cause we already did a
13117 // full compacting GC.
13118 loh_alloc_state = (can_use_existing_p ?
13119 a_state_can_allocate :
13121 a_state_cant_allocate :
13122 a_state_acquire_seg_after_cg));
13123 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13126 case a_state_try_fit_after_bgc:
13128 BOOL commit_failed_p = FALSE;
13129 BOOL can_use_existing_p = FALSE;
13131 can_use_existing_p = loh_try_fit (gen_number, size, acontext,
13132 align_const, &commit_failed_p, &oom_r);
13133 loh_alloc_state = (can_use_existing_p ?
13134 a_state_can_allocate :
13136 a_state_trigger_full_compact_gc :
13137 a_state_acquire_seg_after_bgc));
13138 assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
13141 case a_state_acquire_seg:
13143 BOOL can_get_new_seg_p = FALSE;
13144 BOOL did_full_compacting_gc = FALSE;
13146 current_full_compact_gc_count = get_full_compact_gc_count();
13148 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13149 loh_alloc_state = (can_get_new_seg_p ?
13150 a_state_try_fit_new_seg :
13151 (did_full_compacting_gc ?
13152 a_state_check_retry_seg :
13153 a_state_check_and_wait_for_bgc));
13156 case a_state_acquire_seg_after_cg:
13158 BOOL can_get_new_seg_p = FALSE;
13159 BOOL did_full_compacting_gc = FALSE;
13161 current_full_compact_gc_count = get_full_compact_gc_count();
13163 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13164 // Since we release the msl before we try to allocate a seg, other
13165 // threads could have allocated a bunch of segments before us so
13166 // we might need to retry.
13167 loh_alloc_state = (can_get_new_seg_p ?
13168 a_state_try_fit_after_cg :
13169 a_state_check_retry_seg);
13172 case a_state_acquire_seg_after_bgc:
13174 BOOL can_get_new_seg_p = FALSE;
13175 BOOL did_full_compacting_gc = FALSE;
13177 current_full_compact_gc_count = get_full_compact_gc_count();
13179 can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
13180 loh_alloc_state = (can_get_new_seg_p ?
13181 a_state_try_fit_new_seg :
13182 (did_full_compacting_gc ?
13183 a_state_check_retry_seg :
13184 a_state_trigger_full_compact_gc));
13185 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13188 case a_state_check_and_wait_for_bgc:
13190 BOOL bgc_in_progress_p = FALSE;
13191 BOOL did_full_compacting_gc = FALSE;
13193 bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc, true);
13194 loh_alloc_state = (!bgc_in_progress_p ?
13195 a_state_trigger_full_compact_gc :
13196 (did_full_compacting_gc ?
13197 a_state_try_fit_after_cg :
13198 a_state_try_fit_after_bgc));
13201 case a_state_trigger_full_compact_gc:
13203 if (fgn_maxgen_percent)
13205 dprintf (2, ("FGN: LOH doing last GC before we throw OOM"));
13206 send_full_gc_notification (max_generation, FALSE);
13209 BOOL got_full_compacting_gc = FALSE;
13211 got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, true);
13212 loh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
13213 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13216 case a_state_check_retry_seg:
13218 BOOL should_retry_gc = retry_full_compact_gc (size);
13219 BOOL should_retry_get_seg = FALSE;
13220 if (!should_retry_gc)
13222 size_t last_full_compact_gc_count = current_full_compact_gc_count;
13223 current_full_compact_gc_count = get_full_compact_gc_count();
13224 if (current_full_compact_gc_count > last_full_compact_gc_count)
13226 should_retry_get_seg = TRUE;
13230 loh_alloc_state = (should_retry_gc ?
13231 a_state_trigger_full_compact_gc :
13232 (should_retry_get_seg ?
13233 a_state_try_fit_after_cg :
13234 a_state_cant_allocate));
13235 assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
13240 assert (!"Invalid state!");
13247 if (loh_alloc_state == a_state_cant_allocate)
13249 assert (oom_r != oom_no_failure);
13250 if (should_retry_other_heap (size))
13252 loh_alloc_state = a_state_retry_allocate;
13256 handle_oom (heap_number,
13262 add_saved_spinlock_info (true, me_release, mt_alloc_large_cant);
13263 leave_spin_lock (&more_space_lock_loh);
13266 assert ((loh_alloc_state == a_state_can_allocate) ||
13267 (loh_alloc_state == a_state_cant_allocate) ||
13268 (loh_alloc_state == a_state_retry_allocate));
13269 return loh_alloc_state;
13272 // BGC's final mark phase will acquire the msl, so release it here and re-acquire.
13273 void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr,
13274 GCSpinLock* msl, bool loh_p,
13275 msl_take_state take_state)
13277 #ifdef BACKGROUND_GC
13280 add_saved_spinlock_info (loh_p, me_release, take_state);
13281 leave_spin_lock (msl);
13283 #endif //BACKGROUND_GC
13285 vm_heap->GarbageCollectGeneration (gen_number, gr);
13287 #ifdef MULTIPLE_HEAPS
13290 enter_spin_lock (msl);
13291 add_saved_spinlock_info (loh_p, me_acquire, take_state);
13293 #endif //MULTIPLE_HEAPS
13295 #ifdef BACKGROUND_GC
13298 enter_spin_lock (msl);
13299 add_saved_spinlock_info (loh_p, me_acquire, take_state);
13301 #endif //BACKGROUND_GC
13304 allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
13307 if (gc_heap::gc_started)
13309 wait_for_gc_done();
13310 return a_state_retry_allocate;
13313 bool loh_p = (gen_number > 0);
13314 GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
13316 #ifdef SYNCHRONIZATION_STATS
13317 int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter();
13318 #endif //SYNCHRONIZATION_STATS
13319 enter_spin_lock (msl);
13320 add_saved_spinlock_info (loh_p, me_acquire, mt_try_alloc);
13321 dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number));
13322 #ifdef SYNCHRONIZATION_STATS
13323 int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start;
13324 total_msl_acquire += msl_acquire;
13325 num_msl_acquired++;
13326 if (msl_acquire > 200)
13328 num_high_msl_acquire++;
13332 num_low_msl_acquire++;
13334 #endif //SYNCHRONIZATION_STATS
13337 // We are commenting this out 'cause we don't see the point - we already
13338 // have checked gc_started when we were acquiring the msl - no need to check
13339 // again. This complicates the logic in bgc_suspend_EE 'cause that one would
13340 // need to release msl which causes all sorts of trouble.
13341 if (gc_heap::gc_started)
13343 #ifdef SYNCHRONIZATION_STATS
13345 #endif //SYNCHRONIZATION_STATS
13346 BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0;
13349 //Rendez vous early (MP scaling issue)
13350 //dprintf (1, ("[%d]waiting for gc", heap_number));
13351 wait_for_gc_done();
13352 #ifdef MULTIPLE_HEAPS
13354 #endif //MULTIPLE_HEAPS
13359 dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number));
13361 int align_const = get_alignment_constant (gen_number != (max_generation+1));
13363 if (fgn_maxgen_percent)
13365 check_for_full_gc (gen_number, size);
13368 if (!(new_allocation_allowed (gen_number)))
13370 if (fgn_maxgen_percent && (gen_number == 0))
13372 // We only check gen0 every so often, so take this opportunity to check again.
13373 check_for_full_gc (gen_number, size);
13376 #ifdef BACKGROUND_GC
13377 wait_for_bgc_high_memory (awr_gen0_alloc, loh_p);
13378 #endif //BACKGROUND_GC
13380 #ifdef SYNCHRONIZATION_STATS
13382 #endif //SYNCHRONIZATION_STATS
13383 dprintf (/*100*/ 2, ("running out of budget on gen%d, gc", gen_number));
13385 if (!settings.concurrent || (gen_number == 0))
13387 trigger_gc_for_alloc (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh),
13388 msl, loh_p, mt_try_budget);
13392 allocation_state can_allocate = ((gen_number == 0) ?
13393 allocate_small (gen_number, size, acontext, align_const) :
13394 allocate_large (gen_number, size, acontext, align_const));
13396 if (can_allocate == a_state_can_allocate)
13398 size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
13399 int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
13401 etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
13403 allocated_since_last_gc += alloc_context_bytes;
13405 if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
13407 #ifdef FEATURE_REDHAWK
13408 FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index],
13409 (gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh);
13411 // Unfortunately some of the ETW macros do not check whether the ETW feature is enabled.
13412 // The ones that do are much less efficient.
13413 #if defined(FEATURE_EVENT_TRACE)
13414 if (EVENT_ENABLED(GCAllocationTick_V3))
13416 fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr);
13418 #endif //FEATURE_EVENT_TRACE
13419 #endif //FEATURE_REDHAWK
13420 etw_allocation_running_amount[etw_allocation_index] = 0;
13424 return can_allocate;
13427 #ifdef MULTIPLE_HEAPS
13428 void gc_heap::balance_heaps (alloc_context* acontext)
13430 if (acontext->alloc_count < 4)
13432 if (acontext->alloc_count == 0)
13434 acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, 0) ));
13435 gc_heap* hp = acontext->get_home_heap()->pGenGCHeap;
13436 dprintf (3, ("First allocation for context %Ix on heap %d\n", (size_t)acontext, (size_t)hp->heap_number));
13437 acontext->set_alloc_heap(acontext->get_home_heap());
13438 hp->alloc_context_count++;
13443 BOOL set_home_heap = FALSE;
13446 if (heap_select::can_find_heap_fast())
13448 if (acontext->get_home_heap() != NULL)
13449 hint = acontext->get_home_heap()->pGenGCHeap->heap_number;
13450 if (acontext->get_home_heap() != GCHeap::GetHeap(hint = heap_select::select_heap(acontext, hint)) || ((acontext->alloc_count & 15) == 0))
13452 set_home_heap = TRUE;
13458 if ((acontext->alloc_count & 3) == 0)
13459 set_home_heap = TRUE;
13465 // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this.
13466 if (n_heaps > MAX_SUPPORTED_CPUS)
13468 // on machines with many processors cache affinity is really king, so don't even try
13469 // to balance on these.
13470 acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext, hint) );
13471 acontext->alloc_heap = acontext->home_heap;
13476 gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13478 dynamic_data* dd = org_hp->dynamic_data_of (0);
13479 ptrdiff_t org_size = dd_new_allocation (dd);
13480 int org_alloc_context_count;
13481 int max_alloc_context_count;
13483 ptrdiff_t max_size;
13484 size_t delta = dd_min_size (dd)/4;
13486 int start, end, finish;
13487 heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13488 finish = start + n_heaps;
13494 max_size = org_size + delta;
13495 acontext->set_home_heap(GCHeap::GetHeap( heap_select::select_heap(acontext, hint) ));
13497 if (org_hp == acontext->get_home_heap()->pGenGCHeap)
13498 max_size = max_size + delta;
13500 org_alloc_context_count = org_hp->alloc_context_count;
13501 max_alloc_context_count = org_alloc_context_count;
13502 if (max_alloc_context_count > 1)
13503 max_size /= max_alloc_context_count;
13505 for (int i = start; i < end; i++)
13507 gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13508 dd = hp->dynamic_data_of (0);
13509 ptrdiff_t size = dd_new_allocation (dd);
13510 if (hp == acontext->get_home_heap()->pGenGCHeap)
13511 size = size + delta;
13512 int hp_alloc_context_count = hp->alloc_context_count;
13513 if (hp_alloc_context_count > 0)
13514 size /= (hp_alloc_context_count + 1);
13515 if (size > max_size)
13519 max_alloc_context_count = hp_alloc_context_count;
13523 while (org_alloc_context_count != org_hp->alloc_context_count ||
13524 max_alloc_context_count != max_hp->alloc_context_count);
13526 if ((max_hp == org_hp) && (end < finish))
13528 start = end; end = finish;
13529 delta = dd_min_size(dd)/2; // Make it twice as hard to balance to remote nodes on NUMA.
13533 if (max_hp != org_hp)
13535 org_hp->alloc_context_count--;
13536 max_hp->alloc_context_count++;
13537 acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
13538 if (!gc_thread_no_affinitize_p)
13540 uint16_t src_proc_no = heap_select::find_proc_no_from_heap_no(org_hp->heap_number);
13541 uint16_t dst_proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
13543 if (!GCToOSInterface::SetCurrentThreadIdealAffinity(src_proc_no, dst_proc_no))
13545 dprintf (3, ("Failed to set the ideal processor for heap %d.",
13546 org_hp->heap_number));
13549 dprintf (3, ("Switching context %p (home heap %d) ",
13551 acontext->get_home_heap()->pGenGCHeap->heap_number));
13552 dprintf (3, (" from heap %d (%Id free bytes, %d contexts) ",
13553 org_hp->heap_number,
13555 org_alloc_context_count));
13556 dprintf (3, (" to heap %d (%Id free bytes, %d contexts)\n",
13557 max_hp->heap_number,
13558 dd_new_allocation(max_hp->dynamic_data_of(0)),
13559 max_alloc_context_count));
13564 acontext->alloc_count++;
13567 gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t alloc_size)
13569 gc_heap* org_hp = acontext->get_alloc_heap()->pGenGCHeap;
13570 dprintf (3, ("[h%d] LA: %Id", org_hp->heap_number, alloc_size));
13572 //if (size > 128*1024)
13575 dynamic_data* dd = org_hp->dynamic_data_of (max_generation + 1);
13577 ptrdiff_t org_size = dd_new_allocation (dd);
13579 ptrdiff_t max_size;
13580 size_t delta = dd_min_size (dd) * 4;
13582 int start, end, finish;
13583 heap_select::get_heap_range_for_heap(org_hp->heap_number, &start, &end);
13584 finish = start + n_heaps;
13589 max_size = org_size + delta;
13590 dprintf (3, ("orig hp: %d, max size: %d",
13591 org_hp->heap_number,
13594 for (int i = start; i < end; i++)
13596 gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
13597 dd = hp->dynamic_data_of (max_generation + 1);
13598 ptrdiff_t size = dd_new_allocation (dd);
13599 dprintf (3, ("hp: %d, size: %d",
13602 if (size > max_size)
13606 dprintf (3, ("max hp: %d, max size: %d",
13607 max_hp->heap_number,
13613 if ((max_hp == org_hp) && (end < finish))
13615 start = end; end = finish;
13616 delta = dd_min_size(dd) * 4; // Need to tuning delta
13620 if (max_hp != org_hp)
13622 dprintf (3, ("loh: %d(%Id)->%d(%Id)",
13623 org_hp->heap_number, dd_new_allocation (org_hp->dynamic_data_of (max_generation + 1)),
13624 max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (max_generation + 1))));
13634 #endif //MULTIPLE_HEAPS
13636 BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
13637 int alloc_generation_number)
13639 allocation_state status;
13642 #ifdef MULTIPLE_HEAPS
13643 if (alloc_generation_number == 0)
13645 balance_heaps (acontext);
13646 status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, alloc_generation_number);
13650 gc_heap* alloc_heap = balance_heaps_loh (acontext, size);
13651 status = alloc_heap->try_allocate_more_space (acontext, size, alloc_generation_number);
13652 if (status == a_state_retry_allocate)
13654 dprintf (3, ("LOH h%d alloc retry!", alloc_heap->heap_number));
13658 status = try_allocate_more_space (acontext, size, alloc_generation_number);
13659 #endif //MULTIPLE_HEAPS
13661 while (status == a_state_retry_allocate);
13663 return (status == a_state_can_allocate);
13667 CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext)
13669 size_t size = Align (jsize);
13670 assert (size >= Align (min_obj_size));
13673 uint8_t* result = acontext->alloc_ptr;
13674 acontext->alloc_ptr+=size;
13675 if (acontext->alloc_ptr <= acontext->alloc_limit)
13677 CObjectHeader* obj = (CObjectHeader*)result;
13683 acontext->alloc_ptr -= size;
13686 #pragma inline_depth(0)
13689 if (! allocate_more_space (acontext, size, 0))
13693 #pragma inline_depth(20)
13701 void gc_heap::leave_allocation_segment (generation* gen)
13703 adjust_limit (0, 0, gen, max_generation);
13706 void gc_heap::init_free_and_plug()
13708 #ifdef FREE_USAGE_STATS
13709 for (int i = 0; i <= settings.condemned_generation; i++)
13711 generation* gen = generation_of (i);
13712 memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces));
13713 memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13714 memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces));
13717 if (settings.condemned_generation != max_generation)
13719 for (int i = (settings.condemned_generation + 1); i <= max_generation; i++)
13721 generation* gen = generation_of (i);
13722 memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
13725 #endif //FREE_USAGE_STATS
13728 void gc_heap::print_free_and_plug (const char* msg)
13730 #if defined(FREE_USAGE_STATS) && defined(SIMPLE_DPRINTF)
13731 int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1));
13732 for (int i = 0; i <= older_gen; i++)
13734 generation* gen = generation_of (i);
13735 for (int j = 0; j < NUM_GEN_POWER2; j++)
13737 if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0))
13739 dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id",
13742 (settings.concurrent ? "BGC" : "GC"),
13745 (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j]));
13750 UNREFERENCED_PARAMETER(msg);
13751 #endif //FREE_USAGE_STATS && SIMPLE_DPRINTF
13754 void gc_heap::add_gen_plug (int gen_number, size_t plug_size)
13756 #ifdef FREE_USAGE_STATS
13757 dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number));
13758 generation* gen = generation_of (gen_number);
13759 size_t sz = BASE_GEN_SIZE;
13762 for (; i < NUM_GEN_POWER2; i++)
13764 if (plug_size < sz)
13771 (gen->gen_plugs[i])++;
13773 UNREFERENCED_PARAMETER(gen_number);
13774 UNREFERENCED_PARAMETER(plug_size);
13775 #endif //FREE_USAGE_STATS
13778 void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size)
13780 #ifdef FREE_USAGE_STATS
13781 generation* gen = generation_of (gen_number);
13782 size_t sz = BASE_GEN_SIZE;
13785 for (; i < NUM_GEN_POWER2; i++)
13787 if (free_size < sz)
13794 (gen->gen_current_pinned_free_spaces[i])++;
13795 generation_pinned_free_obj_space (gen) += free_size;
13796 dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)",
13797 free_size, (i + 10), gen_number,
13798 generation_pinned_free_obj_space (gen),
13799 gen->gen_current_pinned_free_spaces[i]));
13801 UNREFERENCED_PARAMETER(gen_number);
13802 UNREFERENCED_PARAMETER(free_size);
13803 #endif //FREE_USAGE_STATS
13806 void gc_heap::add_gen_free (int gen_number, size_t free_size)
13808 #ifdef FREE_USAGE_STATS
13809 dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number));
13810 generation* gen = generation_of (gen_number);
13811 size_t sz = BASE_GEN_SIZE;
13814 for (; i < NUM_GEN_POWER2; i++)
13816 if (free_size < sz)
13823 (gen->gen_free_spaces[i])++;
13825 UNREFERENCED_PARAMETER(gen_number);
13826 UNREFERENCED_PARAMETER(free_size);
13827 #endif //FREE_USAGE_STATS
13830 void gc_heap::remove_gen_free (int gen_number, size_t free_size)
13832 #ifdef FREE_USAGE_STATS
13833 dprintf (3, ("removing free %Id from gen%d", free_size, gen_number));
13834 generation* gen = generation_of (gen_number);
13835 size_t sz = BASE_GEN_SIZE;
13838 for (; i < NUM_GEN_POWER2; i++)
13840 if (free_size < sz)
13847 (gen->gen_free_spaces[i])--;
13849 UNREFERENCED_PARAMETER(gen_number);
13850 UNREFERENCED_PARAMETER(free_size);
13851 #endif //FREE_USAGE_STATS
13854 uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
13855 int from_gen_number,
13856 uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL)
13858 size = Align (size);
13859 assert (size >= Align (min_obj_size));
13860 assert (from_gen_number < max_generation);
13861 assert (from_gen_number >= 0);
13862 assert (generation_of (from_gen_number + 1) == gen);
13864 allocator* gen_allocator = generation_allocator (gen);
13865 BOOL discard_p = gen_allocator->discard_if_no_fit_p ();
13866 int pad_in_front = ((old_loc != 0) && ((from_gen_number+1) != max_generation)) ? USE_PADDING_FRONT : 0;
13868 size_t real_size = size + Align (min_obj_size);
13870 real_size += Align (min_obj_size);
13872 if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
13873 generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)))
13875 size_t sz_list = gen_allocator->first_bucket_size();
13876 for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
13878 if ((real_size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
13880 uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
13881 uint8_t* prev_free_item = 0;
13882 while (free_list != 0)
13884 dprintf (3, ("considering free list %Ix", (size_t)free_list));
13886 size_t free_list_size = unused_array_size (free_list);
13888 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size),
13889 old_loc, USE_PADDING_TAIL | pad_in_front))
13891 dprintf (4, ("F:%Ix-%Id",
13892 (size_t)free_list, free_list_size));
13894 gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, !discard_p);
13895 generation_free_list_space (gen) -= free_list_size;
13896 remove_gen_free (gen->gen_num, free_list_size);
13898 adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
13899 generation_allocate_end_seg_p (gen) = FALSE;
13902 // We do first fit on bucket 0 because we are not guaranteed to find a fit there.
13903 else if (discard_p || (a_l_idx == 0))
13905 dprintf (3, ("couldn't use this free area, discarding"));
13906 generation_free_obj_space (gen) += free_list_size;
13908 gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
13909 generation_free_list_space (gen) -= free_list_size;
13910 remove_gen_free (gen->gen_num, free_list_size);
13914 prev_free_item = free_list;
13916 free_list = free_list_slot (free_list);
13919 sz_list = sz_list * 2;
13921 //go back to the beginning of the segment list
13922 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
13923 if (seg != generation_allocation_segment (gen))
13925 leave_allocation_segment (gen);
13926 generation_allocation_segment (gen) = seg;
13928 while (seg != ephemeral_heap_segment)
13930 if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13931 heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front))
13933 dprintf (3, ("using what's left in committed"));
13934 adjust_limit (heap_segment_plan_allocated (seg),
13935 heap_segment_committed (seg) -
13936 heap_segment_plan_allocated (seg),
13937 gen, from_gen_number+1);
13938 generation_allocate_end_seg_p (gen) = TRUE;
13939 // dformat (t, 3, "Expanding segment allocation");
13940 heap_segment_plan_allocated (seg) =
13941 heap_segment_committed (seg);
13946 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
13947 heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
13948 grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))
13950 dprintf (3, ("using what's left in reserved"));
13951 adjust_limit (heap_segment_plan_allocated (seg),
13952 heap_segment_committed (seg) -
13953 heap_segment_plan_allocated (seg),
13954 gen, from_gen_number+1);
13955 generation_allocate_end_seg_p (gen) = TRUE;
13956 heap_segment_plan_allocated (seg) =
13957 heap_segment_committed (seg);
13963 leave_allocation_segment (gen);
13964 heap_segment* next_seg = heap_segment_next_rw (seg);
13967 dprintf (3, ("getting next segment"));
13968 generation_allocation_segment (gen) = next_seg;
13969 generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
13970 generation_allocation_limit (gen) = generation_allocation_pointer (gen);
13979 seg = generation_allocation_segment (gen);
13981 //No need to fix the last region. Will be done later
13992 uint8_t* result = generation_allocation_pointer (gen);
13996 if ((pad_in_front & USE_PADDING_FRONT) &&
13997 (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
13998 ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14000 pad = Align (min_obj_size);
14001 set_plug_padded (old_loc);
14003 #endif //SHORT_PLUGS
14005 #ifdef FEATURE_STRUCTALIGN
14006 _ASSERTE(!old_loc || alignmentOffset != 0);
14007 _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14010 size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14011 set_node_aligninfo (old_loc, requiredAlignment, pad1);
14014 #else // FEATURE_STRUCTALIGN
14015 if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14017 pad += switch_alignment_size (is_plug_padded (old_loc));
14018 set_node_realigned (old_loc);
14019 dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14020 (size_t)old_loc, (size_t)(result+pad)));
14021 assert (same_large_alignment_p (result + pad, old_loc));
14023 #endif // FEATURE_STRUCTALIGN
14024 dprintf (3, ("Allocate %Id bytes", size));
14026 if ((old_loc == 0) || (pad != 0))
14028 //allocating a non plug or a gap, so reset the start region
14029 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14032 generation_allocation_pointer (gen) += size + pad;
14033 assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14034 if (generation_allocate_end_seg_p (gen))
14036 generation_end_seg_allocated (gen) += size;
14040 generation_free_list_allocated (gen) += size;
14042 generation_allocation_size (gen) += size;
14044 dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix",
14045 generation_allocation_pointer (gen), generation_allocation_limit (gen),
14046 generation_allocation_context_start_region (gen)));
14048 return result + pad;;
14052 void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen)
14054 //make sure that every generation has a planned allocation start
14055 int gen_number = max_generation - 1;
14056 while (gen_number>= 0)
14058 generation* gen = generation_of (gen_number);
14059 if (0 == generation_plan_allocation_start (gen))
14061 realloc_plan_generation_start (gen, consing_gen);
14063 assert (generation_plan_allocation_start (gen));
14068 // now we know the planned allocation size
14069 size_t size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
14070 heap_segment* seg = generation_allocation_segment (consing_gen);
14071 if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg))
14075 heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
14080 assert (settings.condemned_generation == max_generation);
14081 uint8_t* first_address = generation_allocation_limit (consing_gen);
14082 //look through the pinned plugs for relevant ones.
14083 //Look for the right pinned plug to start from.
14086 while (mi != mark_stack_tos)
14088 m = pinned_plug_of (mi);
14089 if ((pinned_plug (m) == first_address))
14094 assert (mi != mark_stack_tos);
14095 pinned_len (m) = size;
14099 //tododefrag optimize for new segment (plan_allocated == mem)
14100 uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen,
14105 BOOL set_padding_on_saved_p,
14106 mark* pinned_plug_entry,
14107 #endif //SHORT_PLUGS
14108 BOOL consider_bestfit,
14109 int active_new_gen_number
14110 REQD_ALIGN_AND_OFFSET_DCL)
14112 UNREFERENCED_PARAMETER(active_new_gen_number);
14113 dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size));
14115 size = Align (size);
14116 assert (size >= Align (min_obj_size));
14117 int pad_in_front = ((old_loc != 0) && (active_new_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;
14119 if (consider_bestfit && use_bestfit)
14121 assert (bestfit_seg);
14122 dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id",
14124 return bestfit_seg->fit (old_loc,
14126 set_padding_on_saved_p,
14128 #endif //SHORT_PLUGS
14129 size REQD_ALIGN_AND_OFFSET_ARG);
14132 heap_segment* seg = generation_allocation_segment (gen);
14134 if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14135 generation_allocation_limit (gen), old_loc,
14136 ((generation_allocation_limit (gen) !=
14137 heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front)))
14139 dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen),
14140 generation_allocation_limit (gen)));
14143 uint8_t* first_address = (generation_allocation_limit (gen) ?
14144 generation_allocation_limit (gen) :
14145 heap_segment_mem (seg));
14146 assert (in_range_for_segment (first_address, seg));
14148 uint8_t* end_address = heap_segment_reserved (seg);
14150 dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix",
14151 first_address, generation_allocation_limit (gen), end_address));
14156 if (heap_segment_allocated (seg) != heap_segment_mem (seg))
14158 assert (settings.condemned_generation == max_generation);
14159 //look through the pinned plugs for relevant ones.
14160 //Look for the right pinned plug to start from.
14161 while (mi != mark_stack_tos)
14163 m = pinned_plug_of (mi);
14164 if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))
14166 dprintf (3, ("aie: found pin: %Ix", pinned_plug (m)));
14172 if (mi != mark_stack_tos)
14174 //fix old free list.
14175 size_t hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
14177 dprintf(3,("gc filling up hole"));
14178 ptrdiff_t mi1 = (ptrdiff_t)mi;
14179 while ((mi1 >= 0) &&
14180 (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen)))
14182 dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1))));
14187 size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1));
14188 pinned_len (pinned_plug_of(mi1)) = hsize;
14189 dprintf (3, ("changing %Ix len %Ix->%Ix",
14190 pinned_plug (pinned_plug_of(mi1)),
14191 saved_pinned_len, pinned_len (pinned_plug_of(mi1))));
14198 assert (generation_allocation_limit (gen) ==
14199 generation_allocation_pointer (gen));
14200 mi = mark_stack_tos;
14203 while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg))
14205 size_t len = pinned_len (m);
14206 uint8_t* free_list = (pinned_plug (m) - len);
14207 dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)",
14208 free_list, (free_list + len), len));
14209 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front))
14211 dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id",
14212 (size_t)free_list, len));
14214 generation_allocation_pointer (gen) = free_list;
14215 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14216 generation_allocation_limit (gen) = (free_list + len);
14218 goto allocate_in_free;
14221 m = pinned_plug_of (mi);
14224 //switch to the end of the segment.
14225 generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg);
14226 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14227 heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14228 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14229 dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)",
14230 generation_allocation_pointer (gen), generation_allocation_limit (gen),
14231 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
14233 if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14234 generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))
14236 dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen),
14237 generation_allocation_limit (gen)));
14238 assert (!"Can't allocate if no free space");
14249 uint8_t* result = generation_allocation_pointer (gen);
14253 if ((pad_in_front & USE_PADDING_FRONT) &&
14254 (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14255 ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14258 pad = Align (min_obj_size);
14259 set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
14261 #endif //SHORT_PLUGS
14263 #ifdef FEATURE_STRUCTALIGN
14264 _ASSERTE(!old_loc || alignmentOffset != 0);
14265 _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14268 size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14269 set_node_aligninfo (old_loc, requiredAlignment, pad1);
14273 #else // FEATURE_STRUCTALIGN
14274 if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14276 pad += switch_alignment_size (is_plug_padded (old_loc));
14277 set_node_realigned (old_loc);
14278 dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14279 (size_t)old_loc, (size_t)(result+pad)));
14280 assert (same_large_alignment_p (result + pad, old_loc));
14283 #endif // FEATURE_STRUCTALIGN
14285 if ((old_loc == 0) || (pad != 0))
14287 //allocating a non plug or a gap, so reset the start region
14288 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14291 generation_allocation_pointer (gen) += size + pad;
14292 assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14293 dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size));
14295 dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix",
14296 generation_allocation_pointer (gen), generation_allocation_limit (gen),
14297 generation_allocation_context_start_region (gen)));
14299 return result + pad;
14303 generation* gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen)
14305 heap_segment* seg = generation_allocation_segment (consing_gen);
14306 if (seg != ephemeral_heap_segment)
14308 assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg));
14309 assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg));
14311 //fix the allocated size of the segment.
14312 heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
14314 generation* new_consing_gen = generation_of (max_generation - 1);
14315 generation_allocation_pointer (new_consing_gen) =
14316 heap_segment_mem (ephemeral_heap_segment);
14317 generation_allocation_limit (new_consing_gen) =
14318 generation_allocation_pointer (new_consing_gen);
14319 generation_allocation_context_start_region (new_consing_gen) =
14320 generation_allocation_pointer (new_consing_gen);
14321 generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment;
14323 return new_consing_gen;
14326 return consing_gen;
14329 uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen,
14331 int from_gen_number,
14333 BOOL* convert_to_pinned_p,
14334 uint8_t* next_pinned_plug,
14335 heap_segment* current_seg,
14336 #endif //SHORT_PLUGS
14338 REQD_ALIGN_AND_OFFSET_DCL)
14340 // Make sure that the youngest generation gap hasn't been allocated
14341 if (settings.promotion)
14343 assert (generation_plan_allocation_start (youngest_generation) == 0);
14346 size = Align (size);
14347 assert (size >= Align (min_obj_size));
14348 int to_gen_number = from_gen_number;
14349 if (from_gen_number != (int)max_generation)
14351 to_gen_number = from_gen_number + (settings.promotion ? 1 : 0);
14354 dprintf (3, ("aic gen%d: s: %Id", gen->gen_num, size));
14356 int pad_in_front = ((old_loc != 0) && (to_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;
14358 if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion)
14360 generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14361 generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
14365 heap_segment* seg = generation_allocation_segment (gen);
14366 if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14367 generation_allocation_limit (gen), old_loc,
14368 ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front)))
14370 if ((! (pinned_plug_que_empty_p()) &&
14371 (generation_allocation_limit (gen) ==
14372 pinned_plug (oldest_pin()))))
14374 size_t entry = deque_pinned_plug();
14375 mark* pinned_plug_entry = pinned_plug_of (entry);
14376 size_t len = pinned_len (pinned_plug_entry);
14377 uint8_t* plug = pinned_plug (pinned_plug_entry);
14378 set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen));
14380 #ifdef FREE_USAGE_STATS
14381 generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen);
14382 dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id",
14383 generation_allocated_since_last_pin (gen),
14385 generation_allocated_in_pinned_free (gen)));
14386 generation_allocated_since_last_pin (gen) = 0;
14388 add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry)));
14389 #endif //FREE_USAGE_STATS
14391 dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix",
14392 mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry))));
14394 assert(mark_stack_array[entry].len == 0 ||
14395 mark_stack_array[entry].len >= Align(min_obj_size));
14396 generation_allocation_pointer (gen) = plug + len;
14397 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14398 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14399 set_allocator_next_pin (gen);
14401 //Add the size of the pinned plug to the right pinned allocations
14402 //find out which gen this pinned plug came from
14403 int frgn = object_gennum (plug);
14404 if ((frgn != (int)max_generation) && settings.promotion)
14406 generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
14407 int togn = object_gennum_plan (plug);
14410 generation_pinned_allocation_compact_size (generation_of (togn)) += len;
14416 if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
14418 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14419 dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen)));
14423 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
14425 heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14426 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14427 dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen)));
14431 #ifndef RESPECT_LARGE_ALIGNMENT
14432 assert (gen != youngest_generation);
14433 #endif //RESPECT_LARGE_ALIGNMENT
14435 if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
14436 heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
14437 (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc,
14438 size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)))
14440 dprintf (3, ("Expanded segment allocation by committing more memory"));
14441 heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
14442 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
14446 heap_segment* next_seg = heap_segment_next (seg);
14447 assert (generation_allocation_pointer (gen)>=
14448 heap_segment_mem (seg));
14449 // Verify that all pinned plugs for this segment are consumed
14450 if (!pinned_plug_que_empty_p() &&
14451 ((pinned_plug (oldest_pin()) <
14452 heap_segment_allocated (seg)) &&
14453 (pinned_plug (oldest_pin()) >=
14454 generation_allocation_pointer (gen))))
14456 LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
14457 pinned_plug (oldest_pin())));
14460 assert (generation_allocation_pointer (gen)>=
14461 heap_segment_mem (seg));
14462 assert (generation_allocation_pointer (gen)<=
14463 heap_segment_committed (seg));
14464 heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
14468 generation_allocation_segment (gen) = next_seg;
14469 generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
14470 generation_allocation_limit (gen) = generation_allocation_pointer (gen);
14471 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14475 return 0; //should only happen during allocation of generation 0 gap
14476 // in that case we are going to grow the heap anyway
14481 set_allocator_next_pin (gen);
14488 assert (generation_allocation_pointer (gen)>=
14489 heap_segment_mem (generation_allocation_segment (gen)));
14490 uint8_t* result = generation_allocation_pointer (gen);
14493 if ((pad_in_front & USE_PADDING_FRONT) &&
14494 (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
14495 ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
14497 ptrdiff_t dist = old_loc - result;
14500 dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc));
14505 if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size)))
14507 dprintf (3, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist));
14511 pad = Align (min_obj_size);
14512 set_plug_padded (old_loc);
14515 #endif //SHORT_PLUGS
14516 #ifdef FEATURE_STRUCTALIGN
14517 _ASSERTE(!old_loc || alignmentOffset != 0);
14518 _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
14519 if ((old_loc != 0))
14521 size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
14522 set_node_aligninfo (old_loc, requiredAlignment, pad1);
14525 #else // FEATURE_STRUCTALIGN
14526 if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
14528 pad += switch_alignment_size (is_plug_padded (old_loc));
14529 set_node_realigned(old_loc);
14530 dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
14531 (size_t)old_loc, (size_t)(result+pad)));
14532 assert (same_large_alignment_p (result + pad, old_loc));
14534 #endif // FEATURE_STRUCTALIGN
14537 if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg))
14539 assert (old_loc != 0);
14540 ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad));
14541 assert (dist_to_next_pin >= 0);
14543 if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size)))
14545 dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP",
14547 generation_allocation_pointer (gen),
14548 generation_allocation_limit (gen),
14551 dist_to_next_pin));
14552 clear_plug_padded (old_loc);
14554 *convert_to_pinned_p = TRUE;
14555 record_interesting_data_point (idp_converted_pin);
14560 #endif //SHORT_PLUGS
14562 if ((old_loc == 0) || (pad != 0))
14564 //allocating a non plug or a gap, so reset the start region
14565 generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
14568 generation_allocation_pointer (gen) += size + pad;
14569 assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
14571 #ifdef FREE_USAGE_STATS
14572 generation_allocated_since_last_pin (gen) += size;
14573 #endif //FREE_USAGE_STATS
14575 dprintf (3, ("aic: ptr: %Ix, limit: %Ix, sr: %Ix",
14576 generation_allocation_pointer (gen), generation_allocation_limit (gen),
14577 generation_allocation_context_start_region (gen)));
14579 assert (result + pad);
14580 return result + pad;
14584 inline int power (int x, int y)
14587 for (int i = 0; i < y; i++)
14594 int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
14597 BOOL* blocking_collection_p
14598 STRESS_HEAP_ARG(int n_original))
14600 int n = current_gen;
14601 #ifdef MULTIPLE_HEAPS
14602 BOOL joined_last_gc_before_oom = FALSE;
14603 for (int i = 0; i < n_heaps; i++)
14605 if (g_heaps[i]->last_gc_before_oom)
14607 dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i));
14608 joined_last_gc_before_oom = TRUE;
14613 BOOL joined_last_gc_before_oom = last_gc_before_oom;
14614 #endif //MULTIPLE_HEAPS
14616 if (joined_last_gc_before_oom && settings.pause_mode != pause_low_latency)
14618 assert (*blocking_collection_p);
14621 if (should_evaluate_elevation && (n == max_generation))
14623 dprintf (GTC_LOG, ("lock: %d(%d)",
14624 (settings.should_lock_elevation ? 1 : 0),
14625 settings.elevation_locked_count));
14627 if (settings.should_lock_elevation)
14629 settings.elevation_locked_count++;
14630 if (settings.elevation_locked_count == 6)
14632 settings.elevation_locked_count = 0;
14636 n = max_generation - 1;
14637 settings.elevation_reduced = TRUE;
14642 settings.elevation_locked_count = 0;
14647 settings.should_lock_elevation = FALSE;
14648 settings.elevation_locked_count = 0;
14651 if (provisional_mode_triggered && (n == max_generation))
14653 // There are a few cases where we should not reduce the generation.
14654 if ((initial_gen == max_generation) || (settings.reason == reason_alloc_loh))
14656 // If we are doing a full GC in the provisional mode, we always
14657 // make it blocking because we don't want to get into a situation
14658 // where foreground GCs are asking for a compacting full GC right away
14659 // and not getting it.
14660 dprintf (GTC_LOG, ("full GC induced, not reducing gen"));
14661 *blocking_collection_p = TRUE;
14663 else if (should_expand_in_full_gc || joined_last_gc_before_oom)
14665 dprintf (GTC_LOG, ("need full blocking GCs to expand heap or avoid OOM, not reducing gen"));
14666 assert (*blocking_collection_p);
14670 dprintf (GTC_LOG, ("reducing gen in PM: %d->%d->%d", initial_gen, n, (max_generation - 1)));
14671 n = max_generation - 1;
14675 if (should_expand_in_full_gc)
14677 should_expand_in_full_gc = FALSE;
14680 if (heap_hard_limit)
14682 // If we have already consumed 90% of the limit, we should check to see if we should compact LOH.
14683 // TODO: should unify this with gen2.
14684 dprintf (GTC_LOG, ("committed %Id is %d%% of limit %Id",
14685 current_total_committed, (int)((float)current_total_committed * 100.0 / (float)heap_hard_limit),
14687 if ((current_total_committed * 10) >= (heap_hard_limit * 9))
14689 bool full_compact_gc_p = false;
14691 size_t loh_frag = get_total_gen_fragmentation (max_generation + 1);
14693 // If the LOH frag is >= 1/8 it's worth compacting it
14694 if ((loh_frag * 8) >= heap_hard_limit)
14696 dprintf (GTC_LOG, ("loh frag: %Id > 1/8 of limit %Id", loh_frag, (heap_hard_limit / 8)));
14697 full_compact_gc_p = true;
14701 // If there's not much fragmentation but it looks like it'll be productive to
14702 // collect LOH, do that.
14703 size_t est_loh_reclaim = get_total_gen_estimated_reclaim (max_generation + 1);
14704 full_compact_gc_p = ((est_loh_reclaim * 8) >= heap_hard_limit);
14705 dprintf (GTC_LOG, ("loh est reclaim: %Id, 1/8 of limit %Id", est_loh_reclaim, (heap_hard_limit / 8)));
14708 if (full_compact_gc_p)
14710 n = max_generation;
14711 *blocking_collection_p = TRUE;
14712 settings.loh_compaction = TRUE;
14713 dprintf (GTC_LOG, ("compacting LOH due to hard limit"));
14718 if ((n == max_generation) && (*blocking_collection_p == FALSE))
14720 // If we are doing a gen2 we should reset elevation regardless and let the gen2
14721 // decide if we should lock again or in the bgc case by design we will not retract
14723 settings.should_lock_elevation = FALSE;
14724 settings.elevation_locked_count = 0;
14725 dprintf (1, ("doing bgc, reset elevation"));
14729 #ifdef BACKGROUND_GC
14730 // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
14731 // generations to be collected,
14733 // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple
14734 // things that need to be fixed in this code block.
14735 if (n_original != max_generation &&
14736 g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
14738 #ifndef FEATURE_REDHAWK
14739 // for the GC stress mix mode throttle down gen2 collections
14740 if (g_pConfig->IsGCStressMix())
14742 size_t current_gc_count = 0;
14744 #ifdef MULTIPLE_HEAPS
14745 current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
14747 current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
14748 #endif //MULTIPLE_HEAPS
14749 // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
14750 if ((current_gc_count % 10) == 0)
14752 n = max_generation;
14755 // for traditional GC stress
14757 #endif // !FEATURE_REDHAWK
14758 if (*blocking_collection_p)
14760 // We call StressHeap() a lot for Concurrent GC Stress. However,
14761 // if we can not do a concurrent collection, no need to stress anymore.
14762 // @TODO: Enable stress when the memory pressure goes down again
14763 GCStressPolicy::GlobalDisable();
14767 n = max_generation;
14770 #endif //BACKGROUND_GC
14771 #endif //STRESS_HEAP
14777 size_t get_survived_size (gc_history_per_heap* hist)
14779 size_t surv_size = 0;
14780 gc_generation_data* gen_data;
14782 for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
14784 gen_data = &(hist->gen_data[gen_number]);
14785 surv_size += (gen_data->size_after -
14786 gen_data->free_list_space_after -
14787 gen_data->free_obj_space_after);
14793 size_t gc_heap::get_total_survived_size()
14795 size_t total_surv_size = 0;
14796 #ifdef MULTIPLE_HEAPS
14797 for (int i = 0; i < gc_heap::n_heaps; i++)
14799 gc_heap* hp = gc_heap::g_heaps[i];
14800 gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
14801 total_surv_size += get_survived_size (current_gc_data_per_heap);
14804 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
14805 total_surv_size = get_survived_size (current_gc_data_per_heap);
14806 #endif //MULTIPLE_HEAPS
14807 return total_surv_size;
14810 size_t gc_heap::get_total_allocated_since_last_gc()
14812 size_t total_allocated_size = 0;
14813 #ifdef MULTIPLE_HEAPS
14814 for (int i = 0; i < gc_heap::n_heaps; i++)
14816 gc_heap* hp = gc_heap::g_heaps[i];
14817 total_allocated_size += hp->allocated_since_last_gc;
14818 hp->allocated_since_last_gc = 0;
14821 total_allocated_size = allocated_since_last_gc;
14822 allocated_since_last_gc = 0;
14823 #endif //MULTIPLE_HEAPS
14824 return total_allocated_size;
14827 // Gets what's allocated on both SOH and LOH that hasn't been collected.
14828 size_t gc_heap::get_current_allocated()
14830 dynamic_data* dd = dynamic_data_of (0);
14831 size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd);
14832 dd = dynamic_data_of (max_generation + 1);
14833 current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd);
14835 return current_alloc;
14838 size_t gc_heap::get_total_allocated()
14840 size_t total_current_allocated = 0;
14841 #ifdef MULTIPLE_HEAPS
14842 for (int i = 0; i < gc_heap::n_heaps; i++)
14844 gc_heap* hp = gc_heap::g_heaps[i];
14845 total_current_allocated += hp->get_current_allocated();
14848 total_current_allocated = get_current_allocated();
14849 #endif //MULTIPLE_HEAPS
14850 return total_current_allocated;
14853 size_t gc_heap::current_generation_size (int gen_number)
14855 dynamic_data* dd = dynamic_data_of (gen_number);
14856 size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd)
14857 - dd_new_allocation (dd));
14863 #pragma warning(push)
14864 #pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
14868 This is called by when we are actually doing a GC, or when we are just checking whether
14869 we would do a full blocking GC, in which case check_only_p is TRUE.
14871 The difference between calling this with check_only_p TRUE and FALSE is that when it's
14873 settings.reason is ignored
14874 budgets are not checked (since they are checked before this is called)
14875 it doesn't change anything non local like generation_skip_ratio
14877 int gc_heap::generation_to_condemn (int n_initial,
14878 BOOL* blocking_collection_p,
14879 BOOL* elevation_requested_p,
14882 gc_mechanisms temp_settings = settings;
14883 gen_to_condemn_tuning temp_condemn_reasons;
14884 gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings);
14885 gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons);
14888 if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh))
14890 assert (n_initial >= 1);
14893 assert (settings.reason != reason_empty);
14896 local_condemn_reasons->init();
14900 if (heap_number == 0)
14902 dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason));
14906 BOOL low_memory_detected = g_low_memory_status;
14907 uint32_t memory_load = 0;
14908 uint64_t available_physical = 0;
14909 uint64_t available_page_file = 0;
14910 BOOL check_memory = FALSE;
14911 BOOL high_fragmentation = FALSE;
14912 BOOL v_high_memory_load = FALSE;
14913 BOOL high_memory_load = FALSE;
14914 BOOL low_ephemeral_space = FALSE;
14915 BOOL evaluate_elevation = TRUE;
14916 *elevation_requested_p = FALSE;
14917 *blocking_collection_p = FALSE;
14919 BOOL check_max_gen_alloc = TRUE;
14923 #endif //STRESS_HEAP
14927 dd_fragmentation (dynamic_data_of (0)) =
14928 generation_free_list_space (youngest_generation) +
14929 generation_free_obj_space (youngest_generation);
14931 dd_fragmentation (dynamic_data_of (max_generation + 1)) =
14932 generation_free_list_space (large_object_generation) +
14933 generation_free_obj_space (large_object_generation);
14935 //save new_allocation
14936 for (i = 0; i <= max_generation+1; i++)
14938 dynamic_data* dd = dynamic_data_of (i);
14939 dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)",
14941 dd_new_allocation (dd),
14942 dd_desired_allocation (dd)));
14943 dd_gc_new_allocation (dd) = dd_new_allocation (dd);
14946 local_condemn_reasons->set_gen (gen_initial, n);
14949 #ifdef BACKGROUND_GC
14950 if (recursive_gc_sync::background_running_p())
14952 dprintf (GTC_LOG, ("bgc in prog, 1"));
14953 check_max_gen_alloc = FALSE;
14955 #endif //BACKGROUND_GC
14957 if (check_max_gen_alloc)
14959 //figure out if large objects need to be collected.
14960 if (get_new_allocation (max_generation+1) <= 0)
14962 n = max_generation;
14963 local_condemn_reasons->set_gen (gen_alloc_budget, n);
14967 //figure out which generation ran out of allocation
14968 for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++)
14970 if (get_new_allocation (i) <= 0)
14981 local_condemn_reasons->set_gen (gen_alloc_budget, n);
14984 dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (max_generation+1) <= 0) ? 3 : n)));
14988 #if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
14989 //time based tuning
14990 // if enough time has elapsed since the last gc
14991 // and the number of gc is too low (1/10 of lower gen) then collect
14992 // This should also be enabled if we have memory concerns
14993 int n_time_max = max_generation;
14997 if (recursive_gc_sync::background_running_p())
14999 n_time_max = max_generation - 1;
15003 if ((local_settings->pause_mode == pause_interactive) ||
15004 (local_settings->pause_mode == pause_sustained_low_latency))
15006 dynamic_data* dd0 = dynamic_data_of (0);
15007 size_t now = GetHighPrecisionTimeStamp();
15009 for (i = (temp_gen+1); i <= n_time_max; i++)
15011 dynamic_data* dd = dynamic_data_of (i);
15012 if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) &&
15013 (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) &&
15014 ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0)))))
15016 n = min (i, n_time_max);
15017 dprintf (GTC_LOG, ("time %d", n));
15022 local_condemn_reasons->set_gen (gen_time_tuning, n);
15028 dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n));
15030 #endif //BACKGROUND_GC && !MULTIPLE_HEAPS
15032 if (n < (max_generation - 1))
15034 if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen))
15036 n = max (n, max_generation - 1);
15037 local_settings->promotion = TRUE;
15038 dprintf (GTC_LOG, ("h%d: skip %d, c %d",
15039 heap_number, generation_skip_ratio, n));
15040 local_condemn_reasons->set_condition (gen_low_card_p);
15046 generation_skip_ratio = 100;
15049 if (dt_low_ephemeral_space_p (check_only_p ?
15050 tuning_deciding_full_gc :
15051 tuning_deciding_condemned_gen))
15053 low_ephemeral_space = TRUE;
15055 n = max (n, max_generation - 1);
15056 local_condemn_reasons->set_condition (gen_low_ephemeral_p);
15057 dprintf (GTC_LOG, ("h%d: low eph", heap_number));
15059 if (!provisional_mode_triggered)
15061 #ifdef BACKGROUND_GC
15062 if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0))
15063 #endif //BACKGROUND_GC
15065 //It is better to defragment first if we are running out of space for
15066 //the ephemeral generation but we have enough fragmentation to make up for it
15067 //in the non ephemeral generation. Essentially we are trading a gen2 for
15068 // having to expand heap in ephemeral collections.
15069 if (dt_high_frag_p (tuning_deciding_condemned_gen,
15070 max_generation - 1,
15073 high_fragmentation = TRUE;
15074 local_condemn_reasons->set_condition (gen_max_high_frag_e_p);
15075 dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number));
15081 //figure out which ephemeral generation is too fragramented
15083 for (i = n+1; i < max_generation; i++)
15085 if (dt_high_frag_p (tuning_deciding_condemned_gen, i))
15087 dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i));
15094 if (low_ephemeral_space)
15097 local_settings->promotion = TRUE;
15102 local_condemn_reasons->set_condition (gen_eph_high_frag_p);
15107 if (settings.pause_mode == pause_low_latency)
15109 if (!is_induced (settings.reason))
15111 n = min (n, max_generation - 1);
15112 dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n));
15113 evaluate_elevation = FALSE;
15119 // It's hard to catch when we get to the point that the memory load is so high
15120 // we get an induced GC from the finalizer thread so we are checking the memory load
15121 // for every gen0 GC.
15122 check_memory = (check_only_p ?
15124 ((n >= 1) || low_memory_detected));
15128 //find out if we are short on memory
15129 get_memory_info (&memory_load, &available_physical, &available_page_file);
15130 if (heap_number == 0)
15132 dprintf (GTC_LOG, ("ml: %d", memory_load));
15135 // Need to get it early enough for all heaps to use.
15136 entry_available_physical_mem = available_physical;
15137 local_settings->entry_memory_load = memory_load;
15139 // @TODO: Force compaction more often under GCSTRESS
15140 if (memory_load >= high_memory_load_th || low_memory_detected)
15142 #ifdef SIMPLE_DPRINTF
15143 // stress log can't handle any parameter that's bigger than a void*.
15144 if (heap_number == 0)
15146 dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical));
15148 #endif //SIMPLE_DPRINTF
15150 high_memory_load = TRUE;
15152 if (memory_load >= v_high_memory_load_th || low_memory_detected)
15154 // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since
15155 // gen1/gen0 may take a lot more memory than gen2.
15156 if (!high_fragmentation)
15158 high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation);
15160 v_high_memory_load = TRUE;
15164 if (!high_fragmentation)
15166 high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical);
15170 if (high_fragmentation)
15172 if (high_memory_load)
15174 local_condemn_reasons->set_condition (gen_max_high_frag_m_p);
15176 else if (v_high_memory_load)
15178 local_condemn_reasons->set_condition (gen_max_high_frag_vm_p);
15184 dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d",
15185 heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load,
15186 high_fragmentation));
15188 if (should_expand_in_full_gc)
15190 dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number));
15191 *blocking_collection_p = TRUE;
15192 evaluate_elevation = FALSE;
15193 n = max_generation;
15194 local_condemn_reasons->set_condition (gen_expand_fullgc_p);
15197 if (last_gc_before_oom)
15199 dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
15200 n = max_generation;
15201 *blocking_collection_p = TRUE;
15203 if ((local_settings->reason == reason_oos_loh) ||
15204 (local_settings->reason == reason_alloc_loh))
15206 evaluate_elevation = FALSE;
15209 local_condemn_reasons->set_condition (gen_before_oom);
15214 if (is_induced_blocking (settings.reason) &&
15215 n_initial == max_generation
15216 IN_STRESS_HEAP( && !settings.stress_induced ))
15218 if (heap_number == 0)
15220 dprintf (GTC_LOG, ("induced - BLOCK"));
15223 *blocking_collection_p = TRUE;
15224 local_condemn_reasons->set_condition (gen_induced_fullgc_p);
15225 evaluate_elevation = FALSE;
15228 if (settings.reason == reason_induced_noforce)
15230 local_condemn_reasons->set_condition (gen_induced_noforce_p);
15231 evaluate_elevation = FALSE;
15235 if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load))
15237 *elevation_requested_p = TRUE;
15239 // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now.
15240 if (high_memory_load || v_high_memory_load)
15242 dynamic_data* dd_max = dynamic_data_of (max_generation);
15243 if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9)
15245 dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)",
15246 dd_new_allocation (dd_max), dd_desired_allocation (dd_max)));
15247 n = max_generation;
15248 local_condemn_reasons->set_condition (gen_almost_max_alloc);
15252 if (n <= max_generation)
15255 if (high_fragmentation)
15257 //elevate to max_generation
15258 n = max_generation;
15259 dprintf (GTC_LOG, ("h%d: f full", heap_number));
15261 #ifdef BACKGROUND_GC
15262 if (high_memory_load || v_high_memory_load)
15264 // For background GC we want to do blocking collections more eagerly because we don't
15265 // want to get into the situation where the memory load becomes high while we are in
15266 // a background GC and we'd have to wait for the background GC to finish to start
15267 // a blocking collection (right now the implemenation doesn't handle converting
15268 // a background GC to a blocking collection midway.
15269 dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number));
15270 *blocking_collection_p = TRUE;
15273 if (v_high_memory_load)
15275 dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number));
15276 *blocking_collection_p = TRUE;
15278 #endif //BACKGROUND_GC
15282 n = max (n, max_generation - 1);
15283 dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n));
15290 if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1)))
15292 dprintf (GTC_LOG, ("h%d: budget %d, check 2",
15293 heap_number, n_alloc));
15294 if (get_new_allocation (max_generation) <= 0)
15296 dprintf (GTC_LOG, ("h%d: budget alloc", heap_number));
15297 n = max_generation;
15298 local_condemn_reasons->set_condition (gen_max_gen1);
15302 //figure out if max_generation is too fragmented -> blocking collection
15303 if (!provisional_mode_triggered && (n == max_generation))
15305 if (dt_high_frag_p (tuning_deciding_condemned_gen, n))
15307 dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n));
15308 local_condemn_reasons->set_condition (gen_max_high_frag_p);
15309 if (local_settings->pause_mode != pause_sustained_low_latency)
15311 *blocking_collection_p = TRUE;
15316 #ifdef BACKGROUND_GC
15317 if (n == max_generation)
15319 if (heap_number == 0)
15321 BOOL bgc_heap_too_small = TRUE;
15322 size_t gen2size = 0;
15323 size_t gen3size = 0;
15324 #ifdef MULTIPLE_HEAPS
15325 for (int i = 0; i < n_heaps; i++)
15327 if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) ||
15328 ((g_heaps[i]->current_generation_size (max_generation + 1)) > bgc_min_per_heap))
15330 bgc_heap_too_small = FALSE;
15334 #else //MULTIPLE_HEAPS
15335 if ((current_generation_size (max_generation) > bgc_min_per_heap) ||
15336 (current_generation_size (max_generation + 1) > bgc_min_per_heap))
15338 bgc_heap_too_small = FALSE;
15340 #endif //MULTIPLE_HEAPS
15342 if (bgc_heap_too_small)
15344 dprintf (GTC_LOG, ("gen2 and gen3 too small"));
15347 // do not turn stress-induced collections into blocking GCs
15348 if (!settings.stress_induced)
15349 #endif //STRESS_HEAP
15351 *blocking_collection_p = TRUE;
15354 local_condemn_reasons->set_condition (gen_gen2_too_small);
15358 #endif //BACKGROUND_GC
15364 #ifdef BACKGROUND_GC
15365 // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
15366 // generations to be collected,
15368 if (orig_gen != max_generation &&
15369 g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
15371 *elevation_requested_p = FALSE;
15373 #endif //BACKGROUND_GC
15374 #endif //STRESS_HEAP
15378 fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024));
15381 local_condemn_reasons->set_gen (gen_final_per_heap, n);
15382 get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons);
15385 local_condemn_reasons->print (heap_number);
15388 if ((local_settings->reason == reason_oos_soh) ||
15389 (local_settings->reason == reason_oos_loh))
15399 #pragma warning(pop)
15403 size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
15405 // if the memory load is higher, the threshold we'd want to collect gets lower.
15406 size_t min_mem_based_on_available =
15407 (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;
15409 size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
15410 uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;
15412 #ifdef SIMPLE_DPRINTF
15413 dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d",
15414 min_mem_based_on_available, ten_percent_size, three_percent_mem));
15415 #endif //SIMPLE_DPRINTF
15416 return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem)));
15420 uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps)
15422 return min (available_mem, (256*1024*1024)) / num_heaps;
15426 CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC'
15430 #ifdef BACKGROUND_GC
15431 void gc_heap::init_background_gc ()
15433 //reset the allocation so foreground gc can allocate into older (max_generation) generation
15434 generation* gen = generation_of (max_generation);
15435 generation_allocation_pointer (gen)= 0;
15436 generation_allocation_limit (gen) = 0;
15437 generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
15439 PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
15441 //reset the plan allocation for each segment
15442 for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment;
15443 seg = heap_segment_next_rw (seg))
15445 heap_segment_plan_allocated (seg) = heap_segment_allocated (seg);
15448 if (heap_number == 0)
15450 dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix",
15452 background_saved_lowest_address,
15453 background_saved_highest_address));
15456 gc_lh_block_event.Reset();
15459 #endif //BACKGROUND_GC
15462 void fire_drain_mark_list_event (size_t mark_list_objects)
15464 FIRE_EVENT(BGCDrainMark, mark_list_objects);
15468 void fire_revisit_event (size_t dirtied_pages,
15469 size_t marked_objects,
15470 BOOL large_objects_p)
15472 FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p);
15476 void fire_overflow_event (uint8_t* overflow_min,
15477 uint8_t* overflow_max,
15478 size_t marked_objects,
15479 int large_objects_p)
15481 FIRE_EVENT(BGCOverflow, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, large_objects_p);
15484 void gc_heap::concurrent_print_time_delta (const char* msg)
15487 size_t current_time = GetHighPrecisionTimeStamp();
15488 size_t elapsed_time = current_time - time_bgc_last;
15489 time_bgc_last = current_time;
15491 dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
15493 UNREFERENCED_PARAMETER(msg);
15497 void gc_heap::free_list_info (int gen_num, const char* msg)
15499 UNREFERENCED_PARAMETER(gen_num);
15500 #if defined (BACKGROUND_GC) && defined (TRACE_GC)
15501 dprintf (3, ("h%d: %s", heap_number, msg));
15502 for (int i = 0; i <= (max_generation + 1); i++)
15504 generation* gen = generation_of (i);
15505 if ((generation_allocation_size (gen) == 0) &&
15506 (generation_free_list_space (gen) == 0) &&
15507 (generation_free_obj_space (gen) == 0))
15509 // don't print if everything is 0.
15513 dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id",
15515 generation_allocation_size (gen),
15516 generation_free_list_space (gen),
15517 generation_free_obj_space (gen)));
15521 UNREFERENCED_PARAMETER(msg);
15522 #endif // BACKGROUND_GC && TRACE_GC
15525 void gc_heap::update_collection_counts_for_no_gc()
15527 assert (settings.pause_mode == pause_no_gc);
15529 settings.condemned_generation = max_generation;
15530 #ifdef MULTIPLE_HEAPS
15531 for (int i = 0; i < n_heaps; i++)
15532 g_heaps[i]->update_collection_counts();
15533 #else //MULTIPLE_HEAPS
15534 update_collection_counts();
15535 #endif //MULTIPLE_HEAPS
15537 full_gc_counts[gc_type_blocking]++;
15540 BOOL gc_heap::should_proceed_with_gc()
15542 if (gc_heap::settings.pause_mode == pause_no_gc)
15544 if (current_no_gc_region_info.started)
15546 // The no_gc mode was already in progress yet we triggered another GC,
15547 // this effectively exits the no_gc mode.
15548 restore_data_for_no_gc();
15551 return should_proceed_for_no_gc();
15557 //internal part of gc used by the serial and concurrent version
15558 void gc_heap::gc1()
15560 #ifdef BACKGROUND_GC
15561 assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15562 #endif //BACKGROUND_GC
15565 mark_time = plan_time = reloc_time = compact_time = sweep_time = 0;
15568 verify_soh_segment_list();
15570 int n = settings.condemned_generation;
15572 if (settings.reason == reason_pm_full_gc)
15574 assert (n == max_generation);
15577 gen_to_condemn_tuning* local_condemn_reasons = &(get_gc_data_per_heap()->gen_to_condemn_reasons);
15578 local_condemn_reasons->init();
15579 local_condemn_reasons->set_gen (gen_initial, n);
15580 local_condemn_reasons->set_gen (gen_final_per_heap, n);
15583 update_collection_counts ();
15585 #ifdef BACKGROUND_GC
15586 bgc_alloc_lock->check();
15587 #endif //BACKGROUND_GC
15589 free_list_info (max_generation, "beginning");
15591 vm_heap->GcCondemnedGeneration = settings.condemned_generation;
15593 assert (g_gc_card_table == card_table);
15595 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
15596 assert (g_gc_card_bundle_table == card_bundle_table);
15600 if (n == max_generation)
15602 gc_low = lowest_address;
15603 gc_high = highest_address;
15607 gc_low = generation_allocation_start (generation_of (n));
15608 gc_high = heap_segment_reserved (ephemeral_heap_segment);
15610 #ifdef BACKGROUND_GC
15611 if (settings.concurrent)
15614 time_bgc_last = GetHighPrecisionTimeStamp();
15617 FIRE_EVENT(BGCBegin);
15619 concurrent_print_time_delta ("BGC");
15621 //#ifdef WRITE_WATCH
15622 //reset_write_watch (FALSE);
15623 //#endif //WRITE_WATCH
15625 concurrent_print_time_delta ("RW");
15626 background_mark_phase();
15627 free_list_info (max_generation, "after mark phase");
15629 background_sweep();
15630 free_list_info (max_generation, "after sweep phase");
15633 #endif //BACKGROUND_GC
15635 mark_phase (n, FALSE);
15637 GCScan::GcRuntimeStructuresValid (FALSE);
15639 GCScan::GcRuntimeStructuresValid (TRUE);
15643 size_t end_gc_time = GetHighPrecisionTimeStamp();
15644 // printf ("generation: %d, elapsed time: %Id\n", n, end_gc_time - dd_time_clock (dynamic_data_of (0)));
15646 //adjust the allocation size from the pinned quantities.
15647 for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++)
15649 generation* gn = generation_of (gen_number);
15650 if (settings.compaction)
15652 generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn);
15653 generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn);
15657 generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn);
15658 generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn);
15660 generation_pinned_allocation_sweep_size (gn) = 0;
15661 generation_pinned_allocation_compact_size (gn) = 0;
15664 #ifdef BACKGROUND_GC
15665 if (settings.concurrent)
15667 dynamic_data* dd = dynamic_data_of (n);
15668 dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15670 free_list_info (max_generation, "after computing new dynamic data");
15672 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
15674 for (int gen_number = 0; gen_number < max_generation; gen_number++)
15676 dprintf (2, ("end of BGC: gen%d new_alloc: %Id",
15677 gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15678 current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number);
15679 current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15680 current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15684 #endif //BACKGROUND_GC
15686 free_list_info (max_generation, "end");
15687 for (int gen_number = 0; gen_number <= n; gen_number++)
15689 dynamic_data* dd = dynamic_data_of (gen_number);
15690 dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
15691 compute_new_dynamic_data (gen_number);
15694 if (n != max_generation)
15696 int gen_num_for_data = ((n < (max_generation - 1)) ? (n + 1) : (max_generation + 1));
15697 for (int gen_number = (n + 1); gen_number <= gen_num_for_data; gen_number++)
15699 get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number);
15700 get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
15701 get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
15705 get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100);
15707 free_list_info (max_generation, "after computing new dynamic data");
15709 if (heap_number == 0)
15711 dprintf (GTC_LOG, ("GC#%d(gen%d) took %Idms",
15712 dd_collection_count (dynamic_data_of (0)),
15713 settings.condemned_generation,
15714 dd_gc_elapsed_time (dynamic_data_of (0))));
15717 for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
15719 dprintf (2, ("end of FGC/NGC: gen%d new_alloc: %Id",
15720 gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
15724 if (n < max_generation)
15726 compute_promoted_allocation (1 + n);
15728 dynamic_data* dd = dynamic_data_of (1 + n);
15729 size_t new_fragmentation = generation_free_list_space (generation_of (1 + n)) +
15730 generation_free_obj_space (generation_of (1 + n));
15732 #ifdef BACKGROUND_GC
15733 if (current_c_gc_state != c_gc_state_planning)
15734 #endif //BACKGROUND_GC
15736 if (settings.promotion)
15738 dd_fragmentation (dd) = new_fragmentation;
15742 //assert (dd_fragmentation (dd) == new_fragmentation);
15747 #ifdef BACKGROUND_GC
15748 if (!settings.concurrent)
15749 #endif //BACKGROUND_GC
15751 #ifndef FEATURE_REDHAWK
15752 // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
15753 assert(GCToEEInterface::IsGCThread());
15754 #endif // FEATURE_REDHAWK
15755 adjust_ephemeral_limits();
15758 #ifdef BACKGROUND_GC
15759 assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1)));
15760 assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
15761 #endif //BACKGROUND_GC
15763 if (fgn_maxgen_percent)
15765 if (settings.condemned_generation == (max_generation - 1))
15767 check_for_full_gc (max_generation - 1, 0);
15769 else if (settings.condemned_generation == max_generation)
15771 if (full_gc_approach_event_set
15772 #ifdef MULTIPLE_HEAPS
15773 && (heap_number == 0)
15774 #endif //MULTIPLE_HEAPS
15777 dprintf (2, ("FGN-GC: setting gen2 end event"));
15779 full_gc_approach_event.Reset();
15780 #ifdef BACKGROUND_GC
15781 // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A
15782 fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE;
15783 #endif //BACKGROUND_GC
15784 full_gc_end_event.Set();
15785 full_gc_approach_event_set = false;
15790 #ifdef BACKGROUND_GC
15791 if (!settings.concurrent)
15792 #endif //BACKGROUND_GC
15794 //decide on the next allocation quantum
15795 if (alloc_contexts_used >= 1)
15797 allocation_quantum = Align (min ((size_t)CLR_SIZE,
15798 (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))),
15799 get_alignment_constant(FALSE));
15800 dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum));
15804 descr_generations (FALSE);
15806 verify_soh_segment_list();
15808 #ifdef BACKGROUND_GC
15809 add_to_history_per_heap();
15810 if (heap_number == 0)
15814 #endif // BACKGROUND_GC
15817 if (GCStatistics::Enabled() && heap_number == 0)
15818 g_GCStatistics.AddGCStats(settings,
15819 dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
15823 fprintf (stdout, "%d,%d,%d,%d,%d,%d\n",
15824 n, mark_time, plan_time, reloc_time, compact_time, sweep_time);
15827 #ifdef BACKGROUND_GC
15828 assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15829 #endif //BACKGROUND_GC
15831 #if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15834 // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same
15835 // value. If we ever allow randomly adjusting this as the process runs,
15836 // we cannot call it this way as joins need to match - we must have the same
15837 // value for all heaps like we do with bgc_heap_walk_for_etw_p.
15838 || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15840 #if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)
15841 || (bgc_heap_walk_for_etw_p && settings.concurrent)
15845 #ifdef BACKGROUND_GC
15846 bool cooperative_mode = true;
15848 if (settings.concurrent)
15850 cooperative_mode = enable_preemptive ();
15852 #ifdef MULTIPLE_HEAPS
15853 bgc_t_join.join(this, gc_join_suspend_ee_verify);
15854 if (bgc_t_join.joined())
15856 bgc_threads_sync_event.Reset();
15858 dprintf(2, ("Joining BGC threads to suspend EE for verify heap"));
15859 bgc_t_join.restart();
15861 if (heap_number == 0)
15864 bgc_threads_sync_event.Set();
15868 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15869 dprintf (2, ("bgc_threads_sync_event is signalled"));
15873 #endif //MULTIPLE_HEAPS
15875 //fix the allocation area so verify_heap can proceed.
15876 fix_allocation_contexts (FALSE);
15878 #endif //BACKGROUND_GC
15880 #ifdef BACKGROUND_GC
15881 assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
15882 #ifdef FEATURE_EVENT_TRACE
15883 if (bgc_heap_walk_for_etw_p && settings.concurrent)
15885 GCToEEInterface::DiagWalkBGCSurvivors(__this);
15887 #ifdef MULTIPLE_HEAPS
15888 bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
15889 if (bgc_t_join.joined())
15891 bgc_t_join.restart();
15893 #endif // MULTIPLE_HEAPS
15895 #endif // FEATURE_EVENT_TRACE
15896 #endif //BACKGROUND_GC
15899 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
15900 verify_heap (FALSE);
15901 #endif // VERIFY_HEAP
15903 #ifdef BACKGROUND_GC
15904 if (settings.concurrent)
15906 repair_allocation_contexts (TRUE);
15908 #ifdef MULTIPLE_HEAPS
15909 bgc_t_join.join(this, gc_join_restart_ee_verify);
15910 if (bgc_t_join.joined())
15912 bgc_threads_sync_event.Reset();
15914 dprintf(2, ("Joining BGC threads to restart EE after verify heap"));
15915 bgc_t_join.restart();
15917 if (heap_number == 0)
15920 bgc_threads_sync_event.Set();
15924 bgc_threads_sync_event.Wait(INFINITE, FALSE);
15925 dprintf (2, ("bgc_threads_sync_event is signalled"));
15929 #endif //MULTIPLE_HEAPS
15931 disable_preemptive (cooperative_mode);
15933 #endif //BACKGROUND_GC
15935 #endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
15937 #ifdef MULTIPLE_HEAPS
15938 if (!settings.concurrent)
15940 gc_t_join.join(this, gc_join_done);
15941 if (gc_t_join.joined ())
15943 gc_heap::internal_gc_done = false;
15945 //equalize the new desired size of the generations
15946 int limit = settings.condemned_generation;
15947 if (limit == max_generation)
15949 limit = max_generation+1;
15951 for (int gen = 0; gen <= limit; gen++)
15953 size_t total_desired = 0;
15955 for (int i = 0; i < gc_heap::n_heaps; i++)
15957 gc_heap* hp = gc_heap::g_heaps[i];
15958 dynamic_data* dd = hp->dynamic_data_of (gen);
15959 size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
15960 if (temp_total_desired < total_desired)
15963 total_desired = (size_t)MAX_PTR;
15966 total_desired = temp_total_desired;
15969 size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps,
15970 get_alignment_constant ((gen != (max_generation+1))));
15974 #if 1 //subsumed by the linear allocation model
15975 // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
15976 // apply some smoothing.
15977 static size_t smoothed_desired_per_heap = 0;
15978 size_t smoothing = 3; // exponential smoothing factor
15979 if (smoothing > VolatileLoad(&settings.gc_index))
15980 smoothing = VolatileLoad(&settings.gc_index);
15981 smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap / smoothing) * (smoothing-1));
15982 dprintf (1, ("sn = %Id n = %Id", smoothed_desired_per_heap, desired_per_heap));
15983 desired_per_heap = Align(smoothed_desired_per_heap, get_alignment_constant (true));
15986 if (!heap_hard_limit)
15988 // if desired_per_heap is close to min_gc_size, trim it
15989 // down to min_gc_size to stay in the cache
15990 gc_heap* hp = gc_heap::g_heaps[0];
15991 dynamic_data* dd = hp->dynamic_data_of (gen);
15992 size_t min_gc_size = dd_min_size(dd);
15993 // if min GC size larger than true on die cache, then don't bother
15994 // limiting the desired size
15995 if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) &&
15996 desired_per_heap <= 2*min_gc_size)
15998 desired_per_heap = min_gc_size;
16002 desired_per_heap = joined_youngest_desired (desired_per_heap);
16003 dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap));
16005 gc_data_global.final_youngest_desired = desired_per_heap;
16007 #if 1 //subsumed by the linear allocation model
16008 if (gen == (max_generation + 1))
16010 // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
16011 // apply some smoothing.
16012 static size_t smoothed_desired_per_heap_loh = 0;
16013 size_t smoothing = 3; // exponential smoothing factor
16014 size_t loh_count = dd_collection_count (dynamic_data_of (max_generation));
16015 if (smoothing > loh_count)
16016 smoothing = loh_count;
16017 smoothed_desired_per_heap_loh = desired_per_heap / smoothing + ((smoothed_desired_per_heap_loh / smoothing) * (smoothing-1));
16018 dprintf (2, ("smoothed_desired_per_heap_loh = %Id desired_per_heap = %Id", smoothed_desired_per_heap_loh, desired_per_heap));
16019 desired_per_heap = Align(smoothed_desired_per_heap_loh, get_alignment_constant (false));
16022 for (int i = 0; i < gc_heap::n_heaps; i++)
16024 gc_heap* hp = gc_heap::g_heaps[i];
16025 dynamic_data* dd = hp->dynamic_data_of (gen);
16026 dd_desired_allocation (dd) = desired_per_heap;
16027 dd_gc_new_allocation (dd) = desired_per_heap;
16028 dd_new_allocation (dd) = desired_per_heap;
16032 hp->fgn_last_alloc = desired_per_heap;
16037 #ifdef FEATURE_LOH_COMPACTION
16038 BOOL all_heaps_compacted_p = TRUE;
16039 #endif //FEATURE_LOH_COMPACTION
16040 for (int i = 0; i < gc_heap::n_heaps; i++)
16042 gc_heap* hp = gc_heap::g_heaps[i];
16043 hp->decommit_ephemeral_segment_pages();
16044 hp->rearrange_large_heap_segments();
16045 #ifdef FEATURE_LOH_COMPACTION
16046 all_heaps_compacted_p &= hp->loh_compacted_p;
16047 #endif //FEATURE_LOH_COMPACTION
16050 #ifdef FEATURE_LOH_COMPACTION
16051 check_loh_compact_mode (all_heaps_compacted_p);
16052 #endif //FEATURE_LOH_COMPACTION
16055 pm_full_gc_init_or_clear();
16057 gc_t_join.restart();
16059 alloc_context_count = 0;
16060 heap_select::mark_heap (heap_number);
16064 gc_data_global.final_youngest_desired =
16065 dd_desired_allocation (dynamic_data_of (0));
16067 check_loh_compact_mode (loh_compacted_p);
16069 decommit_ephemeral_segment_pages();
16072 if (!(settings.concurrent))
16074 rearrange_large_heap_segments();
16078 pm_full_gc_init_or_clear();
16080 #ifdef BACKGROUND_GC
16081 recover_bgc_settings();
16082 #endif //BACKGROUND_GC
16083 #endif //MULTIPLE_HEAPS
16086 void gc_heap::save_data_for_no_gc()
16088 current_no_gc_region_info.saved_pause_mode = settings.pause_mode;
16089 #ifdef MULTIPLE_HEAPS
16090 // This is to affect heap balancing.
16091 for (int i = 0; i < n_heaps; i++)
16093 current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0));
16094 dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold;
16095 current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1));
16096 dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = 0;
16098 #endif //MULTIPLE_HEAPS
16101 void gc_heap::restore_data_for_no_gc()
16103 gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode;
16104 #ifdef MULTIPLE_HEAPS
16105 for (int i = 0; i < n_heaps; i++)
16107 dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size;
16108 dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = current_no_gc_region_info.saved_gen3_min_size;
16110 #endif //MULTIPLE_HEAPS
16113 start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size,
16114 BOOL loh_size_known,
16116 BOOL disallow_full_blocking)
16118 if (current_no_gc_region_info.started)
16120 return start_no_gc_in_progress;
16123 start_no_gc_region_status status = start_no_gc_success;
16125 save_data_for_no_gc();
16126 settings.pause_mode = pause_no_gc;
16127 current_no_gc_region_info.start_status = start_no_gc_success;
16129 uint64_t allocation_no_gc_loh = 0;
16130 uint64_t allocation_no_gc_soh = 0;
16131 assert(total_size != 0);
16132 if (loh_size_known)
16134 assert(loh_size != 0);
16135 assert(loh_size <= total_size);
16136 allocation_no_gc_loh = loh_size;
16137 allocation_no_gc_soh = total_size - loh_size;
16141 allocation_no_gc_soh = total_size;
16142 allocation_no_gc_loh = total_size;
16145 int soh_align_const = get_alignment_constant (TRUE);
16146 size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size;
16147 size_t size_per_heap = 0;
16148 const double scale_factor = 1.05;
16151 #ifdef MULTIPLE_HEAPS
16152 num_heaps = n_heaps;
16153 #endif // MULTIPLE_HEAPS
16155 uint64_t total_allowed_soh_allocation = max_soh_allocated * num_heaps;
16157 // In theory, the upper limit here is the physical memory of the machine, not
16158 // SIZE_T_MAX. This is not true today because total_physical_mem can be
16159 // larger than SIZE_T_MAX if running in wow64 on a machine with more than
16160 // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing
16161 // more freely between branches, it would be good to clean this up to use
16162 // total_physical_mem instead of SIZE_T_MAX.
16163 assert(total_allowed_soh_allocation <= SIZE_T_MAX);
16164 uint64_t total_allowed_loh_allocation = SIZE_T_MAX;
16165 uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0;
16166 uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0;
16168 if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled ||
16169 allocation_no_gc_loh > total_allowed_loh_alloc_scaled)
16171 status = start_no_gc_too_large;
16175 if (allocation_no_gc_soh > 0)
16177 allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor);
16178 allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled);
16181 if (allocation_no_gc_loh > 0)
16183 allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor);
16184 allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled);
16187 if (disallow_full_blocking)
16188 current_no_gc_region_info.minimal_gc_p = TRUE;
16190 if (allocation_no_gc_soh != 0)
16192 current_no_gc_region_info.soh_allocation_size = static_cast<size_t>(allocation_no_gc_soh);
16193 size_per_heap = current_no_gc_region_info.soh_allocation_size;
16194 #ifdef MULTIPLE_HEAPS
16195 size_per_heap /= n_heaps;
16196 for (int i = 0; i < n_heaps; i++)
16198 // due to heap balancing we need to allow some room before we even look to balance to another heap.
16199 g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated);
16201 #else //MULTIPLE_HEAPS
16202 soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated);
16203 #endif //MULTIPLE_HEAPS
16206 if (allocation_no_gc_loh != 0)
16208 current_no_gc_region_info.loh_allocation_size = static_cast<size_t>(allocation_no_gc_loh);
16209 size_per_heap = current_no_gc_region_info.loh_allocation_size;
16210 #ifdef MULTIPLE_HEAPS
16211 size_per_heap /= n_heaps;
16212 for (int i = 0; i < n_heaps; i++)
16213 g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
16214 #else //MULTIPLE_HEAPS
16215 loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
16216 #endif //MULTIPLE_HEAPS
16220 if (status != start_no_gc_success)
16221 restore_data_for_no_gc();
16225 void gc_heap::handle_failure_for_no_gc()
16227 gc_heap::restore_data_for_no_gc();
16228 // sets current_no_gc_region_info.started to FALSE here.
16229 memset (¤t_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
16232 start_no_gc_region_status gc_heap::get_start_no_gc_region_status()
16234 return current_no_gc_region_info.start_status;
16237 void gc_heap::record_gcs_during_no_gc()
16239 if (current_no_gc_region_info.started)
16241 current_no_gc_region_info.num_gcs++;
16242 if (is_induced (settings.reason))
16243 current_no_gc_region_info.num_gcs_induced++;
16247 BOOL gc_heap::find_loh_free_for_no_gc()
16249 allocator* loh_allocator = generation_allocator (generation_of (max_generation + 1));
16250 size_t sz_list = loh_allocator->first_bucket_size();
16251 size_t size = loh_allocation_no_gc;
16252 for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
16254 if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
16256 uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
16259 size_t free_list_size = unused_array_size(free_list);
16261 if (free_list_size > loh_allocation_no_gc)
16263 dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size));
16267 free_list = free_list_slot (free_list);
16270 sz_list = sz_list * 2;
16276 BOOL gc_heap::find_loh_space_for_no_gc()
16278 saved_loh_segment_no_gc = 0;
16280 if (find_loh_free_for_no_gc())
16283 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16287 size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg);
16288 if (remaining >= loh_allocation_no_gc)
16290 saved_loh_segment_no_gc = seg;
16293 seg = heap_segment_next (seg);
16296 if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p)
16298 // If no full GC is allowed, we try to get a new seg right away.
16299 saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc)
16300 #ifdef MULTIPLE_HEAPS
16302 #endif //MULTIPLE_HEAPS
16306 return (saved_loh_segment_no_gc != 0);
16309 BOOL gc_heap::loh_allocated_for_no_gc()
16311 if (!saved_loh_segment_no_gc)
16314 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16317 if (seg == saved_loh_segment_no_gc)
16321 seg = heap_segment_next (seg);
16327 BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg)
16329 uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc;
16330 assert (end_committed <= heap_segment_reserved (seg));
16331 return (grow_heap_segment (seg, end_committed));
16334 void gc_heap::thread_no_gc_loh_segments()
16336 #ifdef MULTIPLE_HEAPS
16337 for (int i = 0; i < n_heaps; i++)
16339 gc_heap* hp = g_heaps[i];
16340 if (hp->loh_allocated_for_no_gc())
16342 hp->thread_loh_segment (hp->saved_loh_segment_no_gc);
16343 hp->saved_loh_segment_no_gc = 0;
16346 #else //MULTIPLE_HEAPS
16347 if (loh_allocated_for_no_gc())
16349 thread_loh_segment (saved_loh_segment_no_gc);
16350 saved_loh_segment_no_gc = 0;
16352 #endif //MULTIPLE_HEAPS
16355 void gc_heap::set_loh_allocations_for_no_gc()
16357 if (current_no_gc_region_info.loh_allocation_size != 0)
16359 dynamic_data* dd = dynamic_data_of (max_generation + 1);
16360 dd_new_allocation (dd) = loh_allocation_no_gc;
16361 dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16365 void gc_heap::set_soh_allocations_for_no_gc()
16367 if (current_no_gc_region_info.soh_allocation_size != 0)
16369 dynamic_data* dd = dynamic_data_of (0);
16370 dd_new_allocation (dd) = soh_allocation_no_gc;
16371 dd_gc_new_allocation (dd) = dd_new_allocation (dd);
16372 #ifdef MULTIPLE_HEAPS
16373 alloc_context_count = 0;
16374 #endif //MULTIPLE_HEAPS
16378 void gc_heap::set_allocations_for_no_gc()
16380 #ifdef MULTIPLE_HEAPS
16381 for (int i = 0; i < n_heaps; i++)
16383 gc_heap* hp = g_heaps[i];
16384 hp->set_loh_allocations_for_no_gc();
16385 hp->set_soh_allocations_for_no_gc();
16387 #else //MULTIPLE_HEAPS
16388 set_loh_allocations_for_no_gc();
16389 set_soh_allocations_for_no_gc();
16390 #endif //MULTIPLE_HEAPS
16393 BOOL gc_heap::should_proceed_for_no_gc()
16395 BOOL gc_requested = FALSE;
16396 BOOL loh_full_gc_requested = FALSE;
16397 BOOL soh_full_gc_requested = FALSE;
16398 BOOL no_gc_requested = FALSE;
16399 BOOL get_new_loh_segments = FALSE;
16401 if (current_no_gc_region_info.soh_allocation_size)
16403 #ifdef MULTIPLE_HEAPS
16404 for (int i = 0; i < n_heaps; i++)
16406 gc_heap* hp = g_heaps[i];
16407 if ((size_t)(heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated) < hp->soh_allocation_no_gc)
16409 gc_requested = TRUE;
16413 #else //MULTIPLE_HEAPS
16414 if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated) < soh_allocation_no_gc)
16415 gc_requested = TRUE;
16416 #endif //MULTIPLE_HEAPS
16420 #ifdef MULTIPLE_HEAPS
16421 for (int i = 0; i < n_heaps; i++)
16423 gc_heap* hp = g_heaps[i];
16424 if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc))))
16426 soh_full_gc_requested = TRUE;
16430 #else //MULTIPLE_HEAPS
16431 if (!grow_heap_segment (ephemeral_heap_segment, (alloc_allocated + soh_allocation_no_gc)))
16432 soh_full_gc_requested = TRUE;
16433 #endif //MULTIPLE_HEAPS
16437 if (!current_no_gc_region_info.minimal_gc_p && gc_requested)
16439 soh_full_gc_requested = TRUE;
16442 no_gc_requested = !(soh_full_gc_requested || gc_requested);
16444 if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p)
16446 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16450 if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size)
16452 // Check to see if we have enough reserved space.
16453 #ifdef MULTIPLE_HEAPS
16454 for (int i = 0; i < n_heaps; i++)
16456 gc_heap* hp = g_heaps[i];
16457 if (!hp->find_loh_space_for_no_gc())
16459 loh_full_gc_requested = TRUE;
16463 #else //MULTIPLE_HEAPS
16464 if (!find_loh_space_for_no_gc())
16465 loh_full_gc_requested = TRUE;
16466 #endif //MULTIPLE_HEAPS
16468 // Check to see if we have committed space.
16469 if (!loh_full_gc_requested)
16471 #ifdef MULTIPLE_HEAPS
16472 for (int i = 0; i < n_heaps; i++)
16474 gc_heap* hp = g_heaps[i];
16475 if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc))
16477 loh_full_gc_requested = TRUE;
16481 #else //MULTIPLE_HEAPS
16482 if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc))
16483 loh_full_gc_requested = TRUE;
16484 #endif //MULTIPLE_HEAPS
16488 if (loh_full_gc_requested || soh_full_gc_requested)
16490 if (current_no_gc_region_info.minimal_gc_p)
16491 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16494 no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested);
16496 if (current_no_gc_region_info.start_status == start_no_gc_success)
16498 if (no_gc_requested)
16499 set_allocations_for_no_gc();
16504 if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested)
16508 // We are done with starting the no_gc_region.
16509 current_no_gc_region_info.started = TRUE;
16514 end_no_gc_region_status gc_heap::end_no_gc_region()
16516 dprintf (1, ("end no gc called"));
16518 end_no_gc_region_status status = end_no_gc_success;
16520 if (!(current_no_gc_region_info.started))
16521 status = end_no_gc_not_in_progress;
16522 if (current_no_gc_region_info.num_gcs_induced)
16523 status = end_no_gc_induced;
16524 else if (current_no_gc_region_info.num_gcs)
16525 status = end_no_gc_alloc_exceeded;
16527 if (settings.pause_mode == pause_no_gc)
16528 restore_data_for_no_gc();
16530 // sets current_no_gc_region_info.started to FALSE here.
16531 memset (¤t_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
16537 void gc_heap::update_collection_counts ()
16539 dynamic_data* dd0 = dynamic_data_of (0);
16540 dd_gc_clock (dd0) += 1;
16542 size_t now = GetHighPrecisionTimeStamp();
16544 for (int i = 0; i <= settings.condemned_generation;i++)
16546 dynamic_data* dd = dynamic_data_of (i);
16547 dd_collection_count (dd)++;
16548 //this is needed by the linear allocation model
16549 if (i == max_generation)
16550 dd_collection_count (dynamic_data_of (max_generation+1))++;
16551 dd_gc_clock (dd) = dd_gc_clock (dd0);
16552 dd_time_clock (dd) = now;
16556 BOOL gc_heap::expand_soh_with_minimal_gc()
16558 if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc)
16561 heap_segment* new_seg = soh_get_segment_to_expand();
16564 if (g_gc_card_table != card_table)
16565 copy_brick_card_table();
16567 settings.promotion = TRUE;
16568 settings.demotion = FALSE;
16569 ephemeral_promotion = TRUE;
16570 int condemned_gen_number = max_generation - 1;
16572 generation* gen = 0;
16573 int align_const = get_alignment_constant (TRUE);
16575 for (int i = 0; i <= condemned_gen_number; i++)
16577 gen = generation_of (i);
16578 saved_ephemeral_plan_start[i] = generation_allocation_start (gen);
16579 saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const);
16582 // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2
16583 // and need to make sure that there are no left over bricks from the previous GCs for the space
16584 // we just used for gen0 allocation. We will need to go through the bricks for these objects for
16585 // ephemeral GCs later.
16586 for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
16587 b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment)));
16593 size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) -
16594 generation_allocation_start (generation_of (max_generation - 1)));
16595 heap_segment_next (ephemeral_heap_segment) = new_seg;
16596 ephemeral_heap_segment = new_seg;
16597 uint8_t* start = heap_segment_mem (ephemeral_heap_segment);
16599 for (int i = condemned_gen_number; i >= 0; i--)
16601 gen = generation_of (i);
16602 size_t gen_start_size = Align (min_obj_size);
16603 make_generation (generation_table[i], ephemeral_heap_segment, start, 0);
16604 generation_plan_allocation_start (gen) = start;
16605 generation_plan_allocation_start_size (gen) = gen_start_size;
16606 start += gen_start_size;
16608 heap_segment_used (ephemeral_heap_segment) = start - plug_skew;
16609 heap_segment_plan_allocated (ephemeral_heap_segment) = start;
16611 fix_generation_bounds (condemned_gen_number, generation_of (0));
16613 dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
16614 dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));
16616 adjust_ephemeral_limits();
16623 // Only to be done on the thread that calls restart in a join for server GC
16624 // and reset the oom status per heap.
16625 void gc_heap::check_and_set_no_gc_oom()
16627 #ifdef MULTIPLE_HEAPS
16628 for (int i = 0; i < n_heaps; i++)
16630 gc_heap* hp = g_heaps[i];
16631 if (hp->no_gc_oom_p)
16633 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16634 hp->no_gc_oom_p = false;
16640 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16641 no_gc_oom_p = false;
16643 #endif //MULTIPLE_HEAPS
16646 void gc_heap::allocate_for_no_gc_after_gc()
16648 if (current_no_gc_region_info.minimal_gc_p)
16649 repair_allocation_contexts (TRUE);
16651 no_gc_oom_p = false;
16653 if (current_no_gc_region_info.start_status != start_no_gc_no_memory)
16655 if (current_no_gc_region_info.soh_allocation_size != 0)
16657 if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) ||
16658 (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc))))
16660 no_gc_oom_p = true;
16663 #ifdef MULTIPLE_HEAPS
16664 gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
16665 if (gc_t_join.joined())
16667 #endif //MULTIPLE_HEAPS
16669 check_and_set_no_gc_oom();
16671 #ifdef MULTIPLE_HEAPS
16672 gc_t_join.restart();
16674 #endif //MULTIPLE_HEAPS
16677 if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
16678 !(current_no_gc_region_info.minimal_gc_p) &&
16679 (current_no_gc_region_info.loh_allocation_size != 0))
16681 gc_policy = policy_compact;
16682 saved_loh_segment_no_gc = 0;
16684 if (!find_loh_free_for_no_gc())
16686 heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
16687 BOOL found_seg_p = FALSE;
16690 if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc)
16692 found_seg_p = TRUE;
16693 if (!commit_loh_for_no_gc (seg))
16695 no_gc_oom_p = true;
16699 seg = heap_segment_next (seg);
16703 gc_policy = policy_expand;
16706 #ifdef MULTIPLE_HEAPS
16707 gc_t_join.join(this, gc_join_expand_loh_no_gc);
16708 if (gc_t_join.joined())
16710 check_and_set_no_gc_oom();
16712 if (current_no_gc_region_info.start_status == start_no_gc_success)
16714 for (int i = 0; i < n_heaps; i++)
16716 gc_heap* hp = g_heaps[i];
16717 if (hp->gc_policy == policy_expand)
16719 hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
16720 if (!(hp->saved_loh_segment_no_gc))
16722 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16729 gc_t_join.restart();
16731 #else //MULTIPLE_HEAPS
16732 check_and_set_no_gc_oom();
16734 if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand))
16736 saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc));
16737 if (!saved_loh_segment_no_gc)
16738 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16740 #endif //MULTIPLE_HEAPS
16742 if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc)
16744 if (!commit_loh_for_no_gc (saved_loh_segment_no_gc))
16746 no_gc_oom_p = true;
16752 #ifdef MULTIPLE_HEAPS
16753 gc_t_join.join(this, gc_join_final_no_gc);
16754 if (gc_t_join.joined())
16756 #endif //MULTIPLE_HEAPS
16758 check_and_set_no_gc_oom();
16760 if (current_no_gc_region_info.start_status == start_no_gc_success)
16762 set_allocations_for_no_gc();
16763 current_no_gc_region_info.started = TRUE;
16766 #ifdef MULTIPLE_HEAPS
16767 gc_t_join.restart();
16769 #endif //MULTIPLE_HEAPS
16772 void gc_heap::init_records()
16774 // An option is to move this to be after we figure out which gen to condemn so we don't
16775 // need to clear some generations' data 'cause we know they don't change, but that also means
16776 // we can't simply call memset here.
16777 memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
16778 gc_data_per_heap.heap_index = heap_number;
16779 if (heap_number == 0)
16780 memset (&gc_data_global, 0, sizeof (gc_data_global));
16782 #ifdef GC_CONFIG_DRIVEN
16783 memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc));
16784 #endif //GC_CONFIG_DRIVEN
16785 memset (&fgm_result, 0, sizeof (fgm_result));
16787 for (int i = 0; i <= (max_generation + 1); i++)
16789 gc_data_per_heap.gen_data[i].size_before = generation_size (i);
16790 generation* gen = generation_of (i);
16791 gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen);
16792 gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen);
16795 sufficient_gen0_space_p = FALSE;
16797 #ifdef MULTIPLE_HEAPS
16798 gen0_allocated_after_gc_p = false;
16799 #endif //MULTIPLE_HEAPS
16801 #if defined (_DEBUG) && defined (VERIFY_HEAP)
16802 verify_pinned_queue_p = FALSE;
16803 #endif // _DEBUG && VERIFY_HEAP
16806 void gc_heap::pm_full_gc_init_or_clear()
16808 // This means the next GC will be a full blocking GC and we need to init.
16809 if (settings.condemned_generation == (max_generation - 1))
16811 if (pm_trigger_full_gc)
16813 #ifdef MULTIPLE_HEAPS
16815 #endif //MULTIPLE_HEAPS
16816 dprintf (GTC_LOG, ("init for PM triggered full GC"));
16817 uint32_t saved_entry_memory_load = settings.entry_memory_load;
16818 settings.init_mechanisms();
16819 settings.reason = reason_pm_full_gc;
16820 settings.condemned_generation = max_generation;
16821 settings.entry_memory_load = saved_entry_memory_load;
16822 // Can't assert this since we only check at the end of gen2 GCs,
16823 // during gen1 the memory load could have already dropped.
16824 // Although arguably we should just turn off PM then...
16825 //assert (settings.entry_memory_load >= high_memory_load_th);
16826 assert (settings.entry_memory_load > 0);
16827 settings.gc_index += 1;
16831 // This means we are in the progress of a full blocking GC triggered by
16833 else if (settings.reason == reason_pm_full_gc)
16835 assert (settings.condemned_generation == max_generation);
16836 assert (pm_trigger_full_gc);
16837 pm_trigger_full_gc = false;
16839 dprintf (GTC_LOG, ("PM triggered full GC done"));
16843 void gc_heap::garbage_collect_pm_full_gc()
16845 assert (settings.condemned_generation == max_generation);
16846 assert (settings.reason == reason_pm_full_gc);
16847 assert (!settings.concurrent);
16851 void gc_heap::garbage_collect (int n)
16853 //reset the number of alloc contexts
16854 alloc_contexts_used = 0;
16856 fix_allocation_contexts (TRUE);
16857 #ifdef MULTIPLE_HEAPS
16859 gc_t_join.start_ts(this);
16860 #endif //JOIN_STATS
16861 clear_gen0_bricks();
16862 #endif //MULTIPLE_HEAPS
16864 if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p)
16866 #ifdef MULTIPLE_HEAPS
16867 gc_t_join.join(this, gc_join_minimal_gc);
16868 if (gc_t_join.joined())
16870 #endif //MULTIPLE_HEAPS
16872 #ifdef MULTIPLE_HEAPS
16873 // this is serialized because we need to get a segment
16874 for (int i = 0; i < n_heaps; i++)
16876 if (!(g_heaps[i]->expand_soh_with_minimal_gc()))
16877 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16880 if (!expand_soh_with_minimal_gc())
16881 current_no_gc_region_info.start_status = start_no_gc_no_memory;
16882 #endif //MULTIPLE_HEAPS
16884 update_collection_counts_for_no_gc();
16886 #ifdef MULTIPLE_HEAPS
16887 gc_t_join.restart();
16889 #endif //MULTIPLE_HEAPS
16896 settings.reason = gc_trigger_reason;
16897 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
16898 num_pinned_objects = 0;
16899 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
16902 if (settings.reason == reason_gcstress)
16904 settings.reason = reason_induced;
16905 settings.stress_induced = TRUE;
16907 #endif // STRESS_HEAP
16909 #ifdef MULTIPLE_HEAPS
16910 //align all heaps on the max generation to condemn
16911 dprintf (3, ("Joining for max generation to condemn"));
16912 condemned_generation_num = generation_to_condemn (n,
16913 &blocking_collection,
16914 &elevation_requested,
16916 gc_t_join.join(this, gc_join_generation_determined);
16917 if (gc_t_join.joined())
16918 #endif //MULTIPLE_HEAPS
16920 #if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
16921 //delete old slots from the segment table
16922 seg_table->delete_old_slots();
16923 #endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
16925 #ifdef MULTIPLE_HEAPS
16926 for (int i = 0; i < n_heaps; i++)
16928 gc_heap* hp = g_heaps[i];
16929 // check for card table growth
16930 if (g_gc_card_table != hp->card_table)
16931 hp->copy_brick_card_table();
16933 hp->rearrange_large_heap_segments();
16934 #ifdef BACKGROUND_GC
16935 hp->background_delay_delete_loh_segments();
16936 if (!recursive_gc_sync::background_running_p())
16937 hp->rearrange_small_heap_segments();
16938 #endif //BACKGROUND_GC
16940 #else //MULTIPLE_HEAPS
16941 if (g_gc_card_table != card_table)
16942 copy_brick_card_table();
16944 rearrange_large_heap_segments();
16945 #ifdef BACKGROUND_GC
16946 background_delay_delete_loh_segments();
16947 if (!recursive_gc_sync::background_running_p())
16948 rearrange_small_heap_segments();
16949 #endif //BACKGROUND_GC
16950 #endif //MULTIPLE_HEAPS
16952 BOOL should_evaluate_elevation = TRUE;
16953 BOOL should_do_blocking_collection = FALSE;
16955 #ifdef MULTIPLE_HEAPS
16956 int gen_max = condemned_generation_num;
16957 for (int i = 0; i < n_heaps; i++)
16959 if (gen_max < g_heaps[i]->condemned_generation_num)
16960 gen_max = g_heaps[i]->condemned_generation_num;
16961 if (should_evaluate_elevation && !(g_heaps[i]->elevation_requested))
16962 should_evaluate_elevation = FALSE;
16963 if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
16964 should_do_blocking_collection = TRUE;
16967 settings.condemned_generation = gen_max;
16968 #else //MULTIPLE_HEAPS
16969 settings.condemned_generation = generation_to_condemn (n,
16970 &blocking_collection,
16971 &elevation_requested,
16973 should_evaluate_elevation = elevation_requested;
16974 should_do_blocking_collection = blocking_collection;
16975 #endif //MULTIPLE_HEAPS
16977 settings.condemned_generation = joined_generation_to_condemn (
16978 should_evaluate_elevation,
16980 settings.condemned_generation,
16981 &should_do_blocking_collection
16985 STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
16986 "condemned generation num: %d\n", settings.condemned_generation);
16988 record_gcs_during_no_gc();
16990 if (settings.condemned_generation > 1)
16991 settings.promotion = TRUE;
16993 #ifdef HEAP_ANALYZE
16994 // At this point we've decided what generation is condemned
16995 // See if we've been requested to analyze survivors after the mark phase
16996 if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation))
16998 heap_analyze_enabled = TRUE;
17000 #endif // HEAP_ANALYZE
17002 GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
17004 #ifdef BACKGROUND_GC
17005 if ((settings.condemned_generation == max_generation) &&
17006 (recursive_gc_sync::background_running_p()))
17008 //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
17009 // because we have to collect 0 and 1 properly
17010 // in particular, the allocation contexts are gone.
17011 // For now, it is simpler to collect max_generation-1
17012 settings.condemned_generation = max_generation - 1;
17013 dprintf (GTC_LOG, ("bgc - 1 instead of 2"));
17016 if ((settings.condemned_generation == max_generation) &&
17017 (should_do_blocking_collection == FALSE) &&
17018 gc_can_use_concurrent &&
17019 !temp_disable_concurrent_p &&
17020 ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)))
17022 keep_bgc_threads_p = TRUE;
17023 c_write (settings.concurrent, TRUE);
17025 #endif //BACKGROUND_GC
17027 settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1;
17029 // Call the EE for start of GC work
17030 // just one thread for MP GC
17031 GCToEEInterface::GcStartWork (settings.condemned_generation,
17034 // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to
17035 // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire
17036 // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be
17040 #ifdef MULTIPLE_HEAPS
17041 gc_start_event.Reset();
17042 //start all threads on the roots.
17043 dprintf(3, ("Starting all gc threads for gc"));
17044 gc_t_join.restart();
17045 #endif //MULTIPLE_HEAPS
17048 descr_generations (TRUE);
17051 if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
17052 !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY))
17054 verify_heap (TRUE);
17056 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)
17057 checkGCWriteBarrier();
17059 #endif // VERIFY_HEAP
17061 #ifdef BACKGROUND_GC
17062 if (settings.concurrent)
17064 // We need to save the settings because we'll need to restore it after each FGC.
17065 assert (settings.condemned_generation == max_generation);
17066 settings.compaction = FALSE;
17067 saved_bgc_settings = settings;
17069 #ifdef MULTIPLE_HEAPS
17070 if (heap_number == 0)
17072 for (int i = 0; i < n_heaps; i++)
17074 prepare_bgc_thread (g_heaps[i]);
17076 dprintf (2, ("setting bgc_threads_sync_event"));
17077 bgc_threads_sync_event.Set();
17081 bgc_threads_sync_event.Wait(INFINITE, FALSE);
17082 dprintf (2, ("bgc_threads_sync_event is signalled"));
17085 prepare_bgc_thread(0);
17086 #endif //MULTIPLE_HEAPS
17088 #ifdef MULTIPLE_HEAPS
17089 gc_t_join.join(this, gc_join_start_bgc);
17090 if (gc_t_join.joined())
17091 #endif //MULTIPLE_HEAPS
17093 do_concurrent_p = TRUE;
17094 do_ephemeral_gc_p = FALSE;
17095 #ifdef MULTIPLE_HEAPS
17096 dprintf(2, ("Joined to perform a background GC"));
17098 for (int i = 0; i < n_heaps; i++)
17100 gc_heap* hp = g_heaps[i];
17101 if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
17103 do_concurrent_p = FALSE;
17108 hp->background_saved_lowest_address = hp->lowest_address;
17109 hp->background_saved_highest_address = hp->highest_address;
17113 do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
17114 if (do_concurrent_p)
17116 background_saved_lowest_address = lowest_address;
17117 background_saved_highest_address = highest_address;
17119 #endif //MULTIPLE_HEAPS
17121 if (do_concurrent_p)
17123 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
17124 SoftwareWriteWatch::EnableForGCHeap();
17125 #endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
17127 #ifdef MULTIPLE_HEAPS
17128 for (int i = 0; i < n_heaps; i++)
17129 g_heaps[i]->current_bgc_state = bgc_initialized;
17131 current_bgc_state = bgc_initialized;
17132 #endif //MULTIPLE_HEAPS
17134 int gen = check_for_ephemeral_alloc();
17135 // always do a gen1 GC before we start BGC.
17136 // This is temporary for testing purpose.
17137 //int gen = max_generation - 1;
17138 dont_restart_ee_p = TRUE;
17141 // If we decide to not do a GC before the BGC we need to
17142 // restore the gen0 alloc context.
17143 #ifdef MULTIPLE_HEAPS
17144 for (int i = 0; i < n_heaps; i++)
17146 generation_allocation_pointer (g_heaps[i]->generation_of (0)) = 0;
17147 generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0;
17150 generation_allocation_pointer (youngest_generation) = 0;
17151 generation_allocation_limit (youngest_generation) = 0;
17152 #endif //MULTIPLE_HEAPS
17156 do_ephemeral_gc_p = TRUE;
17158 settings.init_mechanisms();
17159 settings.condemned_generation = gen;
17160 settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2;
17163 // TODO BACKGROUND_GC need to add the profiling stuff here.
17164 dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen));
17167 //clear the cards so they don't bleed in gen 1 during collection
17168 // shouldn't this always be done at the beginning of any GC?
17169 //clear_card_for_addresses (
17170 // generation_allocation_start (generation_of (0)),
17171 // heap_segment_allocated (ephemeral_heap_segment));
17173 if (!do_ephemeral_gc_p)
17175 do_background_gc();
17180 settings.compaction = TRUE;
17181 c_write (settings.concurrent, FALSE);
17184 #ifdef MULTIPLE_HEAPS
17185 gc_t_join.restart();
17186 #endif //MULTIPLE_HEAPS
17189 if (do_concurrent_p)
17191 // At this point we are sure we'll be starting a BGC, so save its per heap data here.
17192 // global data is only calculated at the end of the GC so we don't need to worry about
17193 // FGCs overwriting it.
17194 memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap));
17195 memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));
17197 if (do_ephemeral_gc_p)
17199 dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation));
17201 gen_to_condemn_reasons.init();
17202 gen_to_condemn_reasons.set_condition (gen_before_bgc);
17203 gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons);
17205 #ifdef MULTIPLE_HEAPS
17206 gc_t_join.join(this, gc_join_bgc_after_ephemeral);
17207 if (gc_t_join.joined())
17208 #endif //MULTIPLE_HEAPS
17210 #ifdef MULTIPLE_HEAPS
17212 #endif //MULTIPLE_HEAPS
17213 settings = saved_bgc_settings;
17214 assert (settings.concurrent);
17216 do_background_gc();
17218 #ifdef MULTIPLE_HEAPS
17219 gc_t_join.restart();
17220 #endif //MULTIPLE_HEAPS
17226 dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC"));
17231 #endif //BACKGROUND_GC
17235 #ifndef MULTIPLE_HEAPS
17236 allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
17237 allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
17238 fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
17239 #endif //MULTIPLE_HEAPS
17242 if (settings.pause_mode == pause_no_gc)
17243 allocate_for_no_gc_after_gc();
17247 #define mark_stack_empty_p() (mark_stack_base == mark_stack_tos)
17250 size_t& gc_heap::promoted_bytes(int thread)
17252 #ifdef MULTIPLE_HEAPS
17253 return g_promoted [thread*16];
17254 #else //MULTIPLE_HEAPS
17255 UNREFERENCED_PARAMETER(thread);
17257 #endif //MULTIPLE_HEAPS
17260 #ifdef INTERIOR_POINTERS
17261 heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
17263 #ifdef SEG_MAPPING_TABLE
17264 heap_segment* seg = seg_mapping_table_segment_of (interior);
17267 if (small_segment_only_p && heap_segment_loh_p (seg))
17271 #else //SEG_MAPPING_TABLE
17272 #ifdef MULTIPLE_HEAPS
17273 for (int i = 0; i < gc_heap::n_heaps; i++)
17275 gc_heap* h = gc_heap::g_heaps [i];
17276 hs = h->find_segment_per_heap (o, small_segment_only_p);
17284 gc_heap* h = pGenGCHeap;
17285 hs = h->find_segment_per_heap (o, small_segment_only_p);
17287 #endif //MULTIPLE_HEAPS
17288 #endif //SEG_MAPPING_TABLE
17291 heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
17293 #ifdef SEG_MAPPING_TABLE
17294 return find_segment (interior, small_segment_only_p);
17295 #else //SEG_MAPPING_TABLE
17296 if (in_range_for_segment (interior, ephemeral_heap_segment))
17298 return ephemeral_heap_segment;
17302 heap_segment* found_seg = 0;
17305 heap_segment* seg = generation_start_segment (generation_of (max_generation));
17308 if (in_range_for_segment (interior, seg))
17311 goto end_find_segment;
17314 } while ((seg = heap_segment_next (seg)) != 0);
17316 if (!small_segment_only_p)
17318 #ifdef BACKGROUND_GC
17320 ptrdiff_t delta = 0;
17321 heap_segment* seg = segment_of (interior, delta);
17322 if (seg && in_range_for_segment (interior, seg))
17326 goto end_find_segment;
17328 #else //BACKGROUND_GC
17329 heap_segment* seg = generation_start_segment (generation_of (max_generation+1));
17332 if (in_range_for_segment(interior, seg))
17335 goto end_find_segment;
17338 } while ((seg = heap_segment_next (seg)) != 0);
17339 #endif //BACKGROUND_GC
17345 #endif //SEG_MAPPING_TABLE
17347 #endif //INTERIOR_POINTERS
17349 #if !defined(_DEBUG) && !defined(__GNUC__)
17350 inline // This causes link errors if global optimization is off
17351 #endif //!_DEBUG && !__GNUC__
17352 gc_heap* gc_heap::heap_of (uint8_t* o)
17354 #ifdef MULTIPLE_HEAPS
17356 return g_heaps [0];
17357 #ifdef SEG_MAPPING_TABLE
17358 gc_heap* hp = seg_mapping_table_heap_of (o);
17359 return (hp ? hp : g_heaps[0]);
17360 #else //SEG_MAPPING_TABLE
17361 ptrdiff_t delta = 0;
17362 heap_segment* seg = segment_of (o, delta);
17363 return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17364 #endif //SEG_MAPPING_TABLE
17365 #else //MULTIPLE_HEAPS
17366 UNREFERENCED_PARAMETER(o);
17368 #endif //MULTIPLE_HEAPS
17372 gc_heap* gc_heap::heap_of_gc (uint8_t* o)
17374 #ifdef MULTIPLE_HEAPS
17376 return g_heaps [0];
17377 #ifdef SEG_MAPPING_TABLE
17378 gc_heap* hp = seg_mapping_table_heap_of_gc (o);
17379 return (hp ? hp : g_heaps[0]);
17380 #else //SEG_MAPPING_TABLE
17381 ptrdiff_t delta = 0;
17382 heap_segment* seg = segment_of (o, delta);
17383 return (seg ? heap_segment_heap (seg) : g_heaps [0]);
17384 #endif //SEG_MAPPING_TABLE
17385 #else //MULTIPLE_HEAPS
17386 UNREFERENCED_PARAMETER(o);
17388 #endif //MULTIPLE_HEAPS
17391 #ifdef INTERIOR_POINTERS
17392 // will find all heap objects (large and small)
17393 uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
17395 if (!gen0_bricks_cleared)
17397 #ifdef MULTIPLE_HEAPS
17398 assert (!"Should have already been done in server GC");
17399 #endif //MULTIPLE_HEAPS
17400 gen0_bricks_cleared = TRUE;
17401 //initialize brick table for gen 0
17402 for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
17403 b < brick_of (align_on_brick
17404 (heap_segment_allocated (ephemeral_heap_segment)));
17410 #ifdef FFIND_OBJECT
17411 //indicate that in the future this needs to be done during allocation
17412 #ifdef MULTIPLE_HEAPS
17413 gen0_must_clear_bricks = FFIND_DECAY*gc_heap::n_heaps;
17415 gen0_must_clear_bricks = FFIND_DECAY;
17416 #endif //MULTIPLE_HEAPS
17417 #endif //FFIND_OBJECT
17419 int brick_entry = get_brick_entry(brick_of (interior));
17420 if (brick_entry == 0)
17422 // this is a pointer to a large object
17423 heap_segment* seg = find_segment_per_heap (interior, FALSE);
17425 #ifdef FEATURE_CONSERVATIVE_GC
17426 && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
17430 // If interior falls within the first free object at the beginning of a generation,
17431 // we don't have brick entry for it, and we may incorrectly treat it as on large object heap.
17432 int align_const = get_alignment_constant (heap_segment_read_only_p (seg)
17433 #ifdef FEATURE_CONSERVATIVE_GC
17434 || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg))
17437 //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
17438 assert (interior < heap_segment_allocated (seg));
17440 uint8_t* o = heap_segment_mem (seg);
17441 while (o < heap_segment_allocated (seg))
17443 uint8_t* next_o = o + Align (size (o), align_const);
17444 assert (next_o > o);
17445 if ((o <= interior) && (interior < next_o))
17456 else if (interior >= low)
17458 heap_segment* seg = find_segment_per_heap (interior, TRUE);
17461 #ifdef FEATURE_CONSERVATIVE_GC
17462 if (interior >= heap_segment_allocated (seg))
17465 assert (interior < heap_segment_allocated (seg));
17467 uint8_t* o = find_first_object (interior, heap_segment_mem (seg));
17478 gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
17480 uint8_t* old_address = interior;
17481 if (!((old_address >= low) && (old_address < high)))
17484 size_t brick = brick_of (old_address);
17485 int brick_entry = brick_table [ brick ];
17486 if (brick_entry != 0)
17490 while (brick_entry < 0)
17492 brick = (brick + brick_entry);
17493 brick_entry = brick_table [ brick ];
17495 uint8_t* old_loc = old_address;
17496 uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
17498 if (node <= old_loc)
17503 brick_entry = brick_table [ brick ];
17509 //find the object by going along the plug
17511 while (o <= interior)
17513 uint8_t* next_o = o + Align (size (o));
17514 assert (next_o > o);
17515 if (next_o > interior)
17521 assert ((o <= interior) && ((o + Align (size (o))) > interior));
17526 // this is a pointer to a large object
17527 heap_segment* seg = find_segment_per_heap (interior, FALSE);
17530 assert (interior < heap_segment_allocated (seg));
17532 uint8_t* o = heap_segment_mem (seg);
17533 while (o < heap_segment_allocated (seg))
17535 uint8_t* next_o = o + Align (size (o));
17536 assert (next_o > o);
17537 if ((o < interior) && (interior < next_o))
17549 #else //INTERIOR_POINTERS
17551 uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
17555 #endif //INTERIOR_POINTERS
17557 #ifdef MULTIPLE_HEAPS
17560 #ifdef GC_CONFIG_DRIVEN
17561 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;}}
17562 #else //GC_CONFIG_DRIVEN
17563 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}}
17564 #endif //GC_CONFIG_DRIVEN
17566 #define m_boundary(o) {}
17569 #define m_boundary_fullgc(o) {}
17571 #else //MULTIPLE_HEAPS
17574 #ifdef GC_CONFIG_DRIVEN
17575 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;}
17577 #define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;}
17578 #endif //GC_CONFIG_DRIVEN
17580 #define m_boundary(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17583 #define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
17585 #endif //MULTIPLE_HEAPS
17587 #define method_table(o) ((CObjectHeader*)(o))->GetMethodTable()
17590 BOOL gc_heap::gc_mark1 (uint8_t* o)
17592 BOOL marked = !marked (o);
17594 dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked));
17599 BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17601 BOOL marked = FALSE;
17602 if ((o >= low) && (o < high))
17603 marked = gc_mark1 (o);
17604 #ifdef MULTIPLE_HEAPS
17608 gc_heap* hp = heap_of_gc (o);
17610 if ((o >= hp->gc_low) && (o < hp->gc_high))
17611 marked = gc_mark1 (o);
17614 snoop_stat.objects_checked_count++;
17618 snoop_stat.objects_marked_count++;
17622 snoop_stat.zero_ref_count++;
17625 #endif //SNOOP_STATS
17626 #endif //MULTIPLE_HEAPS
17630 #ifdef BACKGROUND_GC
17633 BOOL gc_heap::background_marked (uint8_t* o)
17635 return mark_array_marked (o);
17638 BOOL gc_heap::background_mark1 (uint8_t* o)
17640 BOOL to_mark = !mark_array_marked (o);
17642 dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0)));
17645 mark_array_set_marked (o);
17646 dprintf (4, ("n*%Ix*n", (size_t)o));
17653 // TODO: we could consider filtering out NULL's here instead of going to
17654 // look for it on other heaps
17656 BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high)
17658 BOOL marked = FALSE;
17659 if ((o >= low) && (o < high))
17660 marked = background_mark1 (o);
17661 #ifdef MULTIPLE_HEAPS
17665 gc_heap* hp = heap_of (o);
17667 if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
17668 marked = background_mark1 (o);
17670 #endif //MULTIPLE_HEAPS
17674 #endif //BACKGROUND_GC
17677 uint8_t* gc_heap::next_end (heap_segment* seg, uint8_t* f)
17679 if (seg == ephemeral_heap_segment)
17682 return heap_segment_allocated (seg);
17685 #define new_start() {if (ppstop <= start) {break;} else {parm = start}}
17686 #define ignore_start 0
17687 #define use_start 1
17689 #define go_through_object(mt,o,size,parm,start,start_useful,limit,exp) \
17691 CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt)); \
17692 CGCDescSeries* cur = map->GetHighestSeries(); \
17693 ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries(); \
17697 CGCDescSeries* last = map->GetLowestSeries(); \
17698 uint8_t** parm = 0; \
17701 assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset())); \
17702 parm = (uint8_t**)((o) + cur->GetSeriesOffset()); \
17703 uint8_t** ppstop = \
17704 (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\
17705 if (!start_useful || (uint8_t*)ppstop > (start)) \
17707 if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\
17708 while (parm < ppstop) \
17716 } while (cur >= last); \
17720 /* Handle the repeating case - array of valuetypes */ \
17721 uint8_t** parm = (uint8_t**)((o) + cur->startoffset); \
17722 if (start_useful && start > (uint8_t*)parm) \
17724 ptrdiff_t cs = mt->RawGetComponentSize(); \
17725 parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \
17727 while ((uint8_t*)parm < ((o)+(size)-plug_skew)) \
17729 for (ptrdiff_t __i = 0; __i > cnt; __i--) \
17731 HALF_SIZE_T skip = cur->val_serie[__i].skip; \
17732 HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs; \
17733 uint8_t** ppstop = parm + nptrs; \
17734 if (!start_useful || (uint8_t*)ppstop > (start)) \
17736 if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start); \
17741 } while (parm < ppstop); \
17743 parm = (uint8_t**)((uint8_t*)ppstop + skip); \
17749 #define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); }
17751 // 1 thing to note about this macro:
17752 // 1) you can use *parm safely but in general you don't want to use parm
17753 // because for the collectible types it's not an address on the managed heap.
17754 #ifndef COLLECTIBLE_CLASS
17755 #define go_through_object_cl(mt,o,size,parm,exp) \
17757 if (header(o)->ContainsPointers()) \
17759 go_through_object_nostart(mt,o,size,parm,exp); \
17762 #else //COLLECTIBLE_CLASS
17763 #define go_through_object_cl(mt,o,size,parm,exp) \
17765 if (header(o)->Collectible()) \
17767 uint8_t* class_obj = get_class_object (o); \
17768 uint8_t** parm = &class_obj; \
17769 do {exp} while (false); \
17771 if (header(o)->ContainsPointers()) \
17773 go_through_object_nostart(mt,o,size,parm,exp); \
17776 #endif //COLLECTIBLE_CLASS
17778 // This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called.
17779 void gc_heap::enque_pinned_plug (uint8_t* plug,
17780 BOOL save_pre_plug_info_p,
17781 uint8_t* last_object_in_last_plug)
17783 if (mark_stack_array_length <= mark_stack_tos)
17785 if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH))
17787 // we don't want to continue here due to security
17788 // risks. This happens very rarely and fixing it in the
17789 // way so that we can continue is a bit involved and will
17790 // not be done in Dev10.
17791 GCToEEInterface::HandleFatalError(CORINFO_EXCEPTION_GC);
17795 dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d",
17796 mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0)));
17797 mark& m = mark_stack_array[mark_stack_tos];
17799 // Must be set now because if we have a short object we'll need the value of saved_pre_p.
17800 m.saved_pre_p = save_pre_plug_info_p;
17802 if (save_pre_plug_info_p)
17805 BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17807 clear_plug_padded (last_object_in_last_plug);
17808 #endif //SHORT_PLUGS
17809 memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17812 set_plug_padded (last_object_in_last_plug);
17813 #endif //SHORT_PLUGS
17815 memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
17817 // If the last object in the last plug is too short, it requires special handling.
17818 size_t last_obj_size = plug - last_object_in_last_plug;
17819 if (last_obj_size < min_pre_pin_obj_size)
17821 record_interesting_data_point (idp_pre_short);
17824 record_interesting_data_point (idp_pre_short_padded);
17825 #endif //SHORT_PLUGS
17826 dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!",
17827 last_object_in_last_plug, plug));
17828 // Need to set the short bit regardless of having refs or not because we need to
17829 // indicate that this object is not walkable.
17832 #ifdef COLLECTIBLE_CLASS
17833 if (is_collectible (last_object_in_last_plug))
17835 m.set_pre_short_collectible();
17837 #endif //COLLECTIBLE_CLASS
17839 if (contain_pointers (last_object_in_last_plug))
17841 dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17843 go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17845 size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17846 dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17847 m.set_pre_short_bit (gap_offset);
17854 m.saved_post_p = FALSE;
17857 void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
17859 UNREFERENCED_PARAMETER(last_pinned_plug);
17861 mark& m = mark_stack_array[mark_stack_tos - 1];
17862 assert (last_pinned_plug == m.first);
17863 m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]);
17866 BOOL is_padded = is_plug_padded (last_object_in_last_plug);
17868 clear_plug_padded (last_object_in_last_plug);
17869 #endif //SHORT_PLUGS
17870 memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17873 set_plug_padded (last_object_in_last_plug);
17874 #endif //SHORT_PLUGS
17876 memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
17878 // This is important - we need to clear all bits here except the last one.
17879 m.saved_post_p = TRUE;
17882 m.saved_post_plug_debug.gap = 1;
17885 dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug));
17887 size_t last_obj_size = post_plug - last_object_in_last_plug;
17888 if (last_obj_size < min_pre_pin_obj_size)
17890 dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug));
17891 record_interesting_data_point (idp_post_short);
17894 record_interesting_data_point (idp_post_short_padded);
17895 #endif //SHORT_PLUGS
17896 m.set_post_short();
17897 #if defined (_DEBUG) && defined (VERIFY_HEAP)
17898 verify_pinned_queue_p = TRUE;
17899 #endif // _DEBUG && VERIFY_HEAP
17901 #ifdef COLLECTIBLE_CLASS
17902 if (is_collectible (last_object_in_last_plug))
17904 m.set_post_short_collectible();
17906 #endif //COLLECTIBLE_CLASS
17908 if (contain_pointers (last_object_in_last_plug))
17910 dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));
17912 // TODO: since we won't be able to walk this object in relocation, we still need to
17913 // take care of collectible assemblies here.
17914 go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
17916 size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
17917 dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
17918 m.set_post_short_bit (gap_offset);
17927 __declspec(naked) void __fastcall Prefetch(void* addr)
17935 inline void Prefetch (void* addr)
17937 UNREFERENCED_PARAMETER(addr);
17942 VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index)
17944 return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index];
17947 #endif //MH_SC_MARK
17951 #define partial_object 3
17953 uint8_t* ref_from_slot (uint8_t* r)
17955 return (uint8_t*)((size_t)r & ~(stolen | partial));
17958 BOOL stolen_p (uint8_t* r)
17960 return (((size_t)r&2) && !((size_t)r&1));
17963 BOOL ready_p (uint8_t* r)
17965 return ((size_t)r != 1);
17968 BOOL partial_p (uint8_t* r)
17970 return (((size_t)r&1) && !((size_t)r&2));
17973 BOOL straight_ref_p (uint8_t* r)
17975 return (!stolen_p (r) && !partial_p (r));
17978 BOOL partial_object_p (uint8_t* r)
17980 return (((size_t)r & partial_object) == partial_object);
17983 BOOL ref_p (uint8_t* r)
17985 return (straight_ref_p (r) || partial_object_p (r));
17988 void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL)
17990 SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array;
17991 SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length];
17992 SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos;
17993 #ifdef SORT_MARK_STACK
17994 SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base;
17995 #endif //SORT_MARK_STACK
17997 // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't
17998 // update mark list.
17999 BOOL full_p = (settings.condemned_generation == max_generation);
18001 assert ((start >= oo) && (start < oo+size(oo)));
18004 *mark_stack_tos = oo;
18005 #endif //!MH_SC_MARK
18009 #ifdef MULTIPLE_HEAPS
18010 #else //MULTIPLE_HEAPS
18011 const int thread = 0;
18012 #endif //MULTIPLE_HEAPS
18014 if (oo && ((size_t)oo != 4))
18022 else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
18024 BOOL overflow_p = FALSE;
18026 if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1))
18028 size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18029 if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1))
18035 if (overflow_p == FALSE)
18037 dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18039 go_through_object_cl (method_table(oo), oo, s, ppslot,
18041 uint8_t* o = *ppslot;
18043 if (gc_mark (o, gc_low, gc_high))
18047 m_boundary_fullgc (o);
18053 size_t obj_size = size (o);
18054 promoted_bytes (thread) += obj_size;
18055 if (contain_pointers_or_collectible (o))
18057 *(mark_stack_tos++) = o;
18065 dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
18066 min_overflow_address = min (min_overflow_address, oo);
18067 max_overflow_address = max (max_overflow_address, oo);
18072 if (partial_p (oo))
18074 start = ref_from_slot (oo);
18075 oo = ref_from_slot (*(--mark_stack_tos));
18076 dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
18077 assert ((oo < start) && (start < (oo + size (oo))));
18079 #ifdef COLLECTIBLE_CLASS
18082 // If there's a class object, push it now. We are guaranteed to have the slot since
18083 // we just popped one object off.
18084 if (is_collectible (oo))
18086 uint8_t* class_obj = get_class_object (oo);
18087 if (gc_mark (class_obj, gc_low, gc_high))
18091 m_boundary_fullgc (class_obj);
18095 m_boundary (class_obj);
18098 size_t obj_size = size (class_obj);
18099 promoted_bytes (thread) += obj_size;
18100 *(mark_stack_tos++) = class_obj;
18101 // The code below expects that the oo is still stored in the stack slot that was
18102 // just popped and it "pushes" it back just by incrementing the mark_stack_tos.
18103 // But the class_obj has just overwritten that stack slot and so the oo needs to
18104 // be stored to the new slot that's pointed to by the mark_stack_tos.
18105 *mark_stack_tos = oo;
18109 if (!contain_pointers (oo))
18114 #endif //COLLECTIBLE_CLASS
18118 BOOL overflow_p = FALSE;
18120 if (mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit)
18124 if (overflow_p == FALSE)
18126 dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18128 //push the object and its current
18129 SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos;
18133 *(place) = (uint8_t*)partial;
18134 #endif //MH_SC_MARK
18135 int i = num_partial_refs;
18136 uint8_t* ref_to_continue = 0;
18138 go_through_object (method_table(oo), oo, s, ppslot,
18139 start, use_start, (oo + s),
18141 uint8_t* o = *ppslot;
18143 if (gc_mark (o, gc_low, gc_high))
18147 m_boundary_fullgc (o);
18153 size_t obj_size = size (o);
18154 promoted_bytes (thread) += obj_size;
18155 if (contain_pointers_or_collectible (o))
18157 *(mark_stack_tos++) = o;
18160 ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial);
18169 //we are finished with this object
18170 assert (ref_to_continue == 0);
18172 assert ((*(place-1)) == (uint8_t*)0);
18175 #endif //MH_SC_MARK
18177 // shouldn't we decrease tos by 2 here??
18180 if (ref_to_continue)
18184 assert ((*(place-1)) == (uint8_t*)0);
18185 *(place-1) = (uint8_t*)((size_t)oo | partial_object);
18186 assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2));
18187 #endif //MH_SC_MARK
18188 *place = ref_to_continue;
18193 dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
18194 min_overflow_address = min (min_overflow_address, oo);
18195 max_overflow_address = max (max_overflow_address, oo);
18198 #ifdef SORT_MARK_STACK
18199 if (mark_stack_tos > sorted_tos + mark_stack_array_length/8)
18201 rqsort1 (sorted_tos, mark_stack_tos-1);
18202 sorted_tos = mark_stack_tos-1;
18204 #endif //SORT_MARK_STACK
18207 if (!(mark_stack_empty_p()))
18209 oo = *(--mark_stack_tos);
18212 #ifdef SORT_MARK_STACK
18213 sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos);
18214 #endif //SORT_MARK_STACK
18222 BOOL same_numa_node_p (int hn1, int hn2)
18224 return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2));
18227 int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps)
18229 int hn = (current_buddy+1)%n_heaps;
18230 while (hn != current_buddy)
18232 if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn)))
18234 hn = (hn+1)%n_heaps;
18236 return current_buddy;
18240 gc_heap::mark_steal()
18242 mark_stack_busy() = 0;
18243 //clear the mark stack in the snooping range
18244 for (int i = 0; i < max_snoop_level; i++)
18246 ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
18249 //pick the next heap as our buddy
18250 int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps);
18253 dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
18254 uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
18255 #endif //SNOOP_STATS
18257 int idle_loop_count = 0;
18258 int first_not_ready_level = 0;
18262 gc_heap* hp = g_heaps [thpn];
18263 int level = first_not_ready_level;
18264 first_not_ready_level = 0;
18266 while (check_next_mark_stack (hp) && (level < (max_snoop_level-1)))
18268 idle_loop_count = 0;
18270 snoop_stat.busy_count++;
18271 dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix",
18272 heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level]));
18273 #endif //SNOOP_STATS
18275 uint8_t* o = ref_mark_stack (hp, level);
18277 uint8_t* start = o;
18280 mark_stack_busy() = 1;
18282 BOOL success = TRUE;
18283 uint8_t* next = (ref_mark_stack (hp, level+1));
18286 if (((size_t)o > 4) && !partial_object_p (o))
18288 //this is a normal object, not a partial mark tuple
18289 //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o);
18290 success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o);
18292 snoop_stat.interlocked_count++;
18294 snoop_stat.normal_count++;
18295 #endif //SNOOP_STATS
18299 //it is a stolen entry, or beginning/ending of a partial mark
18302 snoop_stat.stolen_or_pm_count++;
18303 #endif //SNOOP_STATS
18307 else if (stolen_p (next))
18309 //ignore the stolen guy and go to the next level
18313 snoop_stat.stolen_entry_count++;
18314 #endif //SNOOP_STATS
18318 assert (partial_p (next));
18319 start = ref_from_slot (next);
18320 //re-read the object
18321 o = ref_from_slot (ref_mark_stack (hp, level));
18325 success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next)==next);
18327 snoop_stat.interlocked_count++;
18330 snoop_stat.partial_mark_parent_count++;
18332 #endif //SNOOP_STATS
18336 // stack is not ready, or o is completely different from the last time we read from this stack level.
18337 // go up 2 levels to steal children or totally unrelated objects.
18339 if (first_not_ready_level == 0)
18341 first_not_ready_level = level;
18345 snoop_stat.pm_not_ready_count++;
18346 #endif //SNOOP_STATS
18353 dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
18354 heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18355 (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18356 uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
18357 #endif //SNOOP_STATS
18359 mark_object_simple1 (o, start, heap_number);
18362 dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
18363 heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
18364 (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
18365 #endif //SNOOP_STATS
18367 mark_stack_busy() = 0;
18369 //clear the mark stack in snooping range
18370 for (int i = 0; i < max_snoop_level; i++)
18372 if (((uint8_t**)mark_stack_array)[i] != 0)
18374 ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
18376 snoop_stat.stack_bottom_clear_count++;
18377 #endif //SNOOP_STATS
18383 mark_stack_busy() = 0;
18387 //slot is either partial or stolen
18391 if ((first_not_ready_level != 0) && hp->mark_stack_busy())
18395 if (!hp->mark_stack_busy())
18397 first_not_ready_level = 0;
18400 if ((idle_loop_count % (6) )==1)
18403 snoop_stat.switch_to_thread_count++;
18404 #endif //SNOOP_STATS
18405 GCToOSInterface::Sleep(1);
18407 int free_count = 1;
18409 snoop_stat.stack_idle_count++;
18410 //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number));
18411 #endif //SNOOP_STATS
18412 for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;)
18414 if (!((g_heaps [hpn])->mark_stack_busy()))
18418 dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count));
18419 #endif //SNOOP_STATS
18421 else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999)
18426 hpn = (hpn+1)%n_heaps;
18429 if (free_count == n_heaps)
18438 BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap)
18441 snoop_stat.check_level_count++;
18442 #endif //SNOOP_STATS
18443 return (next_heap->mark_stack_busy()>=1);
18445 #endif //MH_SC_MARK
18448 void gc_heap::print_snoop_stat()
18450 dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s",
18451 "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk"));
18452 dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d",
18453 snoop_stat.heap_index,
18454 snoop_stat.objects_checked_count,
18455 snoop_stat.zero_ref_count,
18456 snoop_stat.objects_marked_count,
18457 snoop_stat.stolen_stack_count,
18458 snoop_stat.partial_stack_count,
18459 snoop_stat.normal_stack_count,
18460 snoop_stat.non_stack_count));
18461 dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s",
18462 "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear"));
18463 dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18464 snoop_stat.heap_index,
18465 snoop_stat.check_level_count,
18466 snoop_stat.busy_count,
18467 snoop_stat.interlocked_count,
18468 snoop_stat.partial_mark_parent_count,
18469 snoop_stat.stolen_or_pm_count,
18470 snoop_stat.stolen_entry_count,
18471 snoop_stat.pm_not_ready_count,
18472 snoop_stat.normal_count,
18473 snoop_stat.stack_bottom_clear_count));
18475 printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n",
18476 "heap", "check", "zero", "mark", "idle", "switch");
18477 printf ("%4d | %8d | %8d | %8d | %8d | %8d\n",
18478 snoop_stat.heap_index,
18479 snoop_stat.objects_checked_count,
18480 snoop_stat.zero_ref_count,
18481 snoop_stat.objects_marked_count,
18482 snoop_stat.stack_idle_count,
18483 snoop_stat.switch_to_thread_count);
18484 printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n",
18485 "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
18486 printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
18487 snoop_stat.heap_index,
18488 snoop_stat.check_level_count,
18489 snoop_stat.busy_count,
18490 snoop_stat.interlocked_count,
18491 snoop_stat.partial_mark_parent_count,
18492 snoop_stat.stolen_or_pm_count,
18493 snoop_stat.stolen_entry_count,
18494 snoop_stat.pm_not_ready_count,
18495 snoop_stat.normal_count,
18496 snoop_stat.stack_bottom_clear_count);
18498 #endif //SNOOP_STATS
18500 #ifdef HEAP_ANALYZE
18502 gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18504 if (!internal_root_array)
18506 internal_root_array = new (nothrow) uint8_t* [internal_root_array_length];
18507 if (!internal_root_array)
18509 heap_analyze_success = FALSE;
18513 if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index))
18515 size_t new_size = 2*internal_root_array_length;
18517 uint64_t available_physical = 0;
18518 get_memory_info (NULL, &available_physical);
18519 if (new_size > (size_t)(available_physical / 10))
18521 heap_analyze_success = FALSE;
18525 uint8_t** tmp = new (nothrow) uint8_t* [new_size];
18528 memcpy (tmp, internal_root_array,
18529 internal_root_array_length*sizeof (uint8_t*));
18530 delete[] internal_root_array;
18531 internal_root_array = tmp;
18532 internal_root_array_length = new_size;
18536 heap_analyze_success = FALSE;
18541 if (heap_analyze_success)
18543 PREFIX_ASSUME(internal_root_array_index < internal_root_array_length);
18545 uint8_t* ref = (uint8_t*)po;
18546 if (!current_obj ||
18547 !((ref >= current_obj) && (ref < (current_obj + current_obj_size))))
18549 gc_heap* hp = gc_heap::heap_of (ref);
18550 current_obj = hp->find_object (ref, hp->lowest_address);
18551 current_obj_size = size (current_obj);
18553 internal_root_array[internal_root_array_index] = current_obj;
18554 internal_root_array_index++;
18558 mark_object_simple (po THREAD_NUMBER_ARG);
18560 #endif //HEAP_ANALYZE
18562 //this method assumes that *po is in the [low. high[ range
18564 gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
18567 #ifdef MULTIPLE_HEAPS
18568 #else //MULTIPLE_HEAPS
18569 const int thread = 0;
18570 #endif //MULTIPLE_HEAPS
18573 snoop_stat.objects_checked_count++;
18574 #endif //SNOOP_STATS
18579 size_t s = size (o);
18580 promoted_bytes (thread) += s;
18582 go_through_object_cl (method_table(o), o, s, poo,
18584 uint8_t* oo = *poo;
18585 if (gc_mark (oo, gc_low, gc_high))
18588 size_t obj_size = size (oo);
18589 promoted_bytes (thread) += obj_size;
18591 if (contain_pointers_or_collectible (oo))
18592 mark_object_simple1 (oo, oo THREAD_NUMBER_ARG);
18602 uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
18604 if ((o >= gc_low) && (o < gc_high))
18605 mark_object_simple (&o THREAD_NUMBER_ARG);
18606 #ifdef MULTIPLE_HEAPS
18610 gc_heap* hp = heap_of (o);
18612 if ((o >= hp->gc_low) && (o < hp->gc_high))
18613 mark_object_simple (&o THREAD_NUMBER_ARG);
18615 #endif //MULTIPLE_HEAPS
18620 #ifdef BACKGROUND_GC
18622 void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL)
18624 uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length];
18626 #ifdef SORT_MARK_STACK
18627 uint8_t** sorted_tos = background_mark_stack_array;
18628 #endif //SORT_MARK_STACK
18630 background_mark_stack_tos = background_mark_stack_array;
18634 #ifdef MULTIPLE_HEAPS
18635 #else //MULTIPLE_HEAPS
18636 const int thread = 0;
18637 #endif //MULTIPLE_HEAPS
18641 if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
18643 BOOL overflow_p = FALSE;
18645 if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1))
18647 size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18648 size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18649 if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1))
18651 dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs",
18653 (size_t)(mark_stack_limit - 1 - background_mark_stack_tos),
18657 bgc_overflow_count++;
18662 if (overflow_p == FALSE)
18664 dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18666 go_through_object_cl (method_table(oo), oo, s, ppslot,
18668 uint8_t* o = *ppslot;
18670 if (background_mark (o,
18671 background_saved_lowest_address,
18672 background_saved_highest_address))
18675 size_t obj_size = size (o);
18676 bpromoted_bytes (thread) += obj_size;
18677 if (contain_pointers_or_collectible (o))
18679 *(background_mark_stack_tos++) = o;
18688 dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18689 background_min_overflow_address = min (background_min_overflow_address, oo);
18690 background_max_overflow_address = max (background_max_overflow_address, oo);
18695 uint8_t* start = oo;
18696 if ((size_t)oo & 1)
18698 oo = (uint8_t*)((size_t)oo & ~1);
18699 start = *(--background_mark_stack_tos);
18700 dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
18702 #ifdef COLLECTIBLE_CLASS
18705 // If there's a class object, push it now. We are guaranteed to have the slot since
18706 // we just popped one object off.
18707 if (is_collectible (oo))
18709 uint8_t* class_obj = get_class_object (oo);
18710 if (background_mark (class_obj,
18711 background_saved_lowest_address,
18712 background_saved_highest_address))
18714 size_t obj_size = size (class_obj);
18715 bpromoted_bytes (thread) += obj_size;
18717 *(background_mark_stack_tos++) = class_obj;
18721 if (!contain_pointers (oo))
18726 #endif //COLLECTIBLE_CLASS
18730 BOOL overflow_p = FALSE;
18732 if (background_mark_stack_tos + (num_partial_refs + 2) >= mark_stack_limit)
18734 size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
18735 size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
18737 dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id",
18739 (size_t)(mark_stack_limit - background_mark_stack_tos),
18745 bgc_overflow_count++;
18748 if (overflow_p == FALSE)
18750 dprintf(3,("pushing mark for %Ix ", (size_t)oo));
18752 //push the object and its current
18753 uint8_t** place = background_mark_stack_tos++;
18755 *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1);
18757 int i = num_partial_refs;
18759 go_through_object (method_table(oo), oo, s, ppslot,
18760 start, use_start, (oo + s),
18762 uint8_t* o = *ppslot;
18765 if (background_mark (o,
18766 background_saved_lowest_address,
18767 background_saved_highest_address))
18770 size_t obj_size = size (o);
18771 bpromoted_bytes (thread) += obj_size;
18772 if (contain_pointers_or_collectible (o))
18774 *(background_mark_stack_tos++) = o;
18778 *place = (uint8_t*)(ppslot+1);
18787 //we are finished with this object
18795 dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
18796 background_min_overflow_address = min (background_min_overflow_address, oo);
18797 background_max_overflow_address = max (background_max_overflow_address, oo);
18801 #ifdef SORT_MARK_STACK
18802 if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8)
18804 rqsort1 (sorted_tos, background_mark_stack_tos-1);
18805 sorted_tos = background_mark_stack_tos-1;
18807 #endif //SORT_MARK_STACK
18809 #ifdef COLLECTIBLE_CLASS
18811 #endif // COLLECTIBLE_CLASS
18814 if (!(background_mark_stack_tos == background_mark_stack_array))
18816 oo = *(--background_mark_stack_tos);
18818 #ifdef SORT_MARK_STACK
18819 sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos);
18820 #endif //SORT_MARK_STACK
18826 assert (background_mark_stack_tos == background_mark_stack_array);
18831 //this version is different than the foreground GC because
18832 //it can't keep pointers to the inside of an object
18833 //while calling background_mark_simple1. The object could be moved
18834 //by an intervening foreground gc.
18835 //this method assumes that *po is in the [low. high[ range
18837 gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL)
18839 #ifdef MULTIPLE_HEAPS
18840 #else //MULTIPLE_HEAPS
18841 const int thread = 0;
18842 #endif //MULTIPLE_HEAPS
18844 dprintf (3, ("bmarking %Ix", o));
18846 if (background_mark1 (o))
18849 size_t s = size (o);
18850 bpromoted_bytes (thread) += s;
18852 if (contain_pointers_or_collectible (o))
18854 background_mark_simple1 (o THREAD_NUMBER_ARG);
18861 uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
18863 if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
18865 background_mark_simple (o THREAD_NUMBER_ARG);
18871 dprintf (3, ("or-%Ix", o));
18877 void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
18879 UNREFERENCED_PARAMETER(sc);
18881 assert (settings.concurrent);
18882 uint8_t* o = (uint8_t*)object;
18884 gc_heap* hp = gc_heap::heap_of (o);
18885 #ifdef INTERIOR_POINTERS
18886 if (flags & GC_CALL_INTERIOR)
18888 o = hp->find_object (o, background_saved_lowest_address);
18890 #endif //INTERIOR_POINTERS
18892 if (!background_object_marked (o, FALSE))
18898 void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
18900 UNREFERENCED_PARAMETER(sc);
18901 //in order to save space on the array, mark the object,
18902 //knowing that it will be visited later
18903 assert (settings.concurrent);
18905 THREAD_NUMBER_FROM_CONTEXT;
18906 #ifndef MULTIPLE_HEAPS
18907 const int thread = 0;
18908 #endif //!MULTIPLE_HEAPS
18910 uint8_t* o = (uint8_t*)*ppObject;
18915 #ifdef DEBUG_DestroyedHandleValue
18916 // we can race with destroy handle during concurrent scan
18917 if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
18919 #endif //DEBUG_DestroyedHandleValue
18923 gc_heap* hp = gc_heap::heap_of (o);
18925 if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
18930 #ifdef INTERIOR_POINTERS
18931 if (flags & GC_CALL_INTERIOR)
18933 o = hp->find_object (o, hp->background_saved_lowest_address);
18937 #endif //INTERIOR_POINTERS
18939 #ifdef FEATURE_CONSERVATIVE_GC
18940 // For conservative GC, a value on stack may point to middle of a free object.
18941 // In this case, we don't need to promote the pointer.
18942 if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
18946 #endif //FEATURE_CONSERVATIVE_GC
18949 ((CObjectHeader*)o)->Validate();
18952 dprintf (BGC_LOG, ("Background Promote %Ix", (size_t)o));
18954 //needs to be called before the marking because it is possible for a foreground
18955 //gc to take place during the mark and move the object
18956 STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
18958 hpt->background_mark_simple (o THREAD_NUMBER_ARG);
18961 //used by the ephemeral collection to scan the local background structures
18962 //containing references.
18964 gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
18970 pSC->thread_number = hn;
18972 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
18973 pSC->pCurrentDomain = 0;
18976 BOOL relocate_p = (fn == &GCHeap::Relocate);
18978 dprintf (3, ("Scanning background mark list"));
18981 size_t mark_list_finger = 0;
18982 while (mark_list_finger < c_mark_list_index)
18984 uint8_t** o = &c_mark_list [mark_list_finger];
18987 // We may not be able to calculate the size during relocate as POPO
18988 // may have written over the object.
18989 size_t s = size (*o);
18990 assert (Align (s) >= Align (min_obj_size));
18991 dprintf(3,("background root %Ix", (size_t)*o));
18993 (*fn) ((Object**)o, pSC, 0);
18994 mark_list_finger++;
18997 //scan the mark stack
18998 dprintf (3, ("Scanning background mark stack"));
19000 uint8_t** finger = background_mark_stack_array;
19001 while (finger < background_mark_stack_tos)
19003 if ((finger + 1) < background_mark_stack_tos)
19005 // We need to check for the partial mark case here.
19006 uint8_t* parent_obj = *(finger + 1);
19007 if ((size_t)parent_obj & 1)
19009 uint8_t* place = *finger;
19010 size_t place_offset = 0;
19011 uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1);
19015 *(finger + 1) = real_parent_obj;
19016 place_offset = place - real_parent_obj;
19017 dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj));
19018 (*fn) ((Object**)(finger + 1), pSC, 0);
19019 real_parent_obj = *(finger + 1);
19020 *finger = real_parent_obj + place_offset;
19021 *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1);
19022 dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1)));
19026 uint8_t** temp = &real_parent_obj;
19027 dprintf(3,("marking background root %Ix", (size_t)real_parent_obj));
19028 (*fn) ((Object**)temp, pSC, 0);
19035 dprintf(3,("background root %Ix", (size_t)*finger));
19036 (*fn) ((Object**)finger, pSC, 0);
19042 void gc_heap::background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL)
19044 if (contain_pointers (oo))
19046 size_t total_refs = 0;
19047 size_t s = size (oo);
19048 go_through_object_nostart (method_table(oo), oo, s, po,
19052 background_mark_object (o THREAD_NUMBER_ARG);
19056 dprintf (3,("Background marking through %Ix went through %Id refs",
19062 uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p)
19064 if (concurrent_p && (seg == saved_overflow_ephemeral_seg))
19066 // for now we stop at where gen1 started when we started processing
19067 return background_min_soh_overflow_address;
19071 return heap_segment_allocated (seg);
19075 uint8_t* gc_heap::background_first_overflow (uint8_t* min_add,
19078 BOOL small_object_p)
19082 if (small_object_p)
19084 if (in_range_for_segment (min_add, seg))
19086 // min_add was the beginning of gen1 when we did the concurrent
19087 // overflow. Now we could be in a situation where min_add is
19088 // actually the same as allocated for that segment (because
19089 // we expanded heap), in which case we can not call
19090 // find first on this address or we will AV.
19091 if (min_add >= heap_segment_allocated (seg))
19097 if (concurrent_p &&
19098 ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address)))
19100 return background_min_soh_overflow_address;
19104 o = find_first_object (min_add, heap_segment_mem (seg));
19111 o = max (heap_segment_mem (seg), min_add);
19115 void gc_heap::background_process_mark_overflow_internal (int condemned_gen_number,
19116 uint8_t* min_add, uint8_t* max_add,
19121 current_bgc_state = bgc_overflow_soh;
19124 size_t total_marked_objects = 0;
19126 #ifdef MULTIPLE_HEAPS
19127 int thread = heap_number;
19128 #endif //MULTIPLE_HEAPS
19130 exclusive_sync* loh_alloc_lock = 0;
19132 dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
19133 #ifdef MULTIPLE_HEAPS
19134 // We don't have each heap scan all heaps concurrently because we are worried about
19135 // multiple threads calling things like find_first_object.
19136 int h_start = (concurrent_p ? heap_number : 0);
19137 int h_end = (concurrent_p ? (heap_number + 1) : n_heaps);
19138 for (int hi = h_start; hi < h_end; hi++)
19140 gc_heap* hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]);
19146 #endif //MULTIPLE_HEAPS
19147 BOOL small_object_segments = TRUE;
19148 int align_const = get_alignment_constant (small_object_segments);
19149 generation* gen = hp->generation_of (condemned_gen_number);
19150 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
19151 PREFIX_ASSUME(seg != NULL);
19152 loh_alloc_lock = hp->bgc_alloc_lock;
19154 uint8_t* o = hp->background_first_overflow (min_add,
19157 small_object_segments);
19161 while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= max_add))
19163 dprintf (3, ("considering %Ix", (size_t)o));
19167 if (concurrent_p && !small_object_segments)
19169 loh_alloc_lock->bgc_mark_set (o);
19171 if (((CObjectHeader*)o)->IsFree())
19173 s = unused_array_size (o);
19185 if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o))
19187 total_marked_objects++;
19188 go_through_object_cl (method_table(o), o, s, poo,
19189 uint8_t* oo = *poo;
19190 background_mark_object (oo THREAD_NUMBER_ARG);
19194 if (concurrent_p && !small_object_segments)
19196 loh_alloc_lock->bgc_mark_done ();
19199 o = o + Align (s, align_const);
19207 dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)",
19208 heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects));
19210 if ((concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) ||
19211 (seg = heap_segment_next_in_range (seg)) == 0)
19213 if (small_object_segments)
19217 current_bgc_state = bgc_overflow_loh;
19220 dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects));
19221 fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
19222 concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH");
19223 total_marked_objects = 0;
19224 small_object_segments = FALSE;
19225 align_const = get_alignment_constant (small_object_segments);
19226 seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
19228 PREFIX_ASSUME(seg != NULL);
19230 o = max (heap_segment_mem (seg), min_add);
19235 dprintf (GTC_LOG, ("h%d: LOH: ov-mo: %Id", heap_number, total_marked_objects));
19236 fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
19242 o = hp->background_first_overflow (min_add,
19245 small_object_segments);
19252 BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p)
19254 BOOL grow_mark_array_p = TRUE;
19258 assert (!processed_soh_overflow_p);
19260 if ((background_max_overflow_address != 0) &&
19261 (background_min_overflow_address != MAX_PTR))
19263 // We have overflow to process but we know we can't process the ephemeral generations
19264 // now (we actually could process till the current gen1 start but since we are going to
19265 // make overflow per segment, for now I'll just stop at the saved gen1 start.
19266 saved_overflow_ephemeral_seg = ephemeral_heap_segment;
19267 background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg);
19268 background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation-1));
19273 assert ((saved_overflow_ephemeral_seg == 0) ||
19274 ((background_max_soh_overflow_address != 0) &&
19275 (background_min_soh_overflow_address != MAX_PTR)));
19277 if (!processed_soh_overflow_p)
19279 // if there was no more overflow we just need to process what we didn't process
19280 // on the saved ephemeral segment.
19281 if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR))
19283 dprintf (2, ("final processing mark overflow - no more overflow since last time"));
19284 grow_mark_array_p = FALSE;
19287 background_min_overflow_address = min (background_min_overflow_address,
19288 background_min_soh_overflow_address);
19289 background_max_overflow_address = max (background_max_overflow_address,
19290 background_max_soh_overflow_address);
19291 processed_soh_overflow_p = TRUE;
19295 BOOL overflow_p = FALSE;
19297 if ((! ((background_max_overflow_address == 0)) ||
19298 ! ((background_min_overflow_address == MAX_PTR))))
19302 if (grow_mark_array_p)
19304 // Try to grow the array.
19305 size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length);
19307 if ((new_size * sizeof(mark)) > 100*1024)
19309 size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
19311 new_size = min(new_max_size, new_size);
19314 if ((background_mark_stack_array_length < new_size) &&
19315 ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2)))
19317 dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size));
19319 uint8_t** tmp = new (nothrow) uint8_t* [new_size];
19322 delete background_mark_stack_array;
19323 background_mark_stack_array = tmp;
19324 background_mark_stack_array_length = new_size;
19325 background_mark_stack_tos = background_mark_stack_array;
19331 grow_mark_array_p = TRUE;
19334 uint8_t* min_add = background_min_overflow_address;
19335 uint8_t* max_add = background_max_overflow_address;
19337 background_max_overflow_address = 0;
19338 background_min_overflow_address = MAX_PTR;
19340 background_process_mark_overflow_internal (max_generation, min_add, max_add, concurrent_p);
19350 #endif //BACKGROUND_GC
19353 void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL)
19355 #ifndef COLLECTIBLE_CLASS
19356 UNREFERENCED_PARAMETER(mark_class_object_p);
19357 BOOL to_mark_class_object = FALSE;
19358 #else //COLLECTIBLE_CLASS
19359 BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo)));
19360 #endif //COLLECTIBLE_CLASS
19361 if (contain_pointers (oo) || to_mark_class_object)
19363 dprintf(3,( "Marking through %Ix", (size_t)oo));
19364 size_t s = size (oo);
19366 #ifdef COLLECTIBLE_CLASS
19367 if (to_mark_class_object)
19369 uint8_t* class_obj = get_class_object (oo);
19370 mark_object (class_obj THREAD_NUMBER_ARG);
19372 #endif //COLLECTIBLE_CLASS
19374 if (contain_pointers (oo))
19376 go_through_object_nostart (method_table(oo), oo, s, po,
19378 mark_object (o THREAD_NUMBER_ARG);
19384 size_t gc_heap::get_total_heap_size()
19386 size_t total_heap_size = 0;
19388 #ifdef MULTIPLE_HEAPS
19391 for (hn = 0; hn < gc_heap::n_heaps; hn++)
19393 gc_heap* hp2 = gc_heap::g_heaps [hn];
19394 total_heap_size += hp2->generation_size (max_generation + 1) + hp2->generation_sizes (hp2->generation_of (max_generation));
19397 total_heap_size = generation_size (max_generation + 1) + generation_sizes (generation_of (max_generation));
19398 #endif //MULTIPLE_HEAPS
19400 return total_heap_size;
19403 size_t gc_heap::get_total_fragmentation()
19405 size_t total_fragmentation = 0;
19407 #ifdef MULTIPLE_HEAPS
19408 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
19410 gc_heap* hp = gc_heap::g_heaps[hn];
19411 #else //MULTIPLE_HEAPS
19413 gc_heap* hp = pGenGCHeap;
19414 #endif //MULTIPLE_HEAPS
19415 for (int i = 0; i <= (max_generation + 1); i++)
19417 generation* gen = hp->generation_of (i);
19418 total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
19422 return total_fragmentation;
19425 size_t gc_heap::get_total_gen_fragmentation (int gen_number)
19427 size_t total_fragmentation = 0;
19429 #ifdef MULTIPLE_HEAPS
19430 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
19432 gc_heap* hp = gc_heap::g_heaps[hn];
19433 #else //MULTIPLE_HEAPS
19435 gc_heap* hp = pGenGCHeap;
19436 #endif //MULTIPLE_HEAPS
19437 generation* gen = hp->generation_of (gen_number);
19438 total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
19441 return total_fragmentation;
19444 size_t gc_heap::get_total_gen_estimated_reclaim (int gen_number)
19446 size_t total_estimated_reclaim = 0;
19448 #ifdef MULTIPLE_HEAPS
19449 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
19451 gc_heap* hp = gc_heap::g_heaps[hn];
19452 #else //MULTIPLE_HEAPS
19454 gc_heap* hp = pGenGCHeap;
19455 #endif //MULTIPLE_HEAPS
19456 total_estimated_reclaim += hp->estimated_reclaim (gen_number);
19459 return total_estimated_reclaim;
19462 size_t gc_heap::committed_size()
19464 generation* gen = generation_of (max_generation);
19465 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
19466 size_t total_committed = 0;
19470 total_committed += heap_segment_committed (seg) - (uint8_t*)seg;
19472 seg = heap_segment_next (seg);
19475 if (gen != large_object_generation)
19477 gen = generation_of (max_generation + 1);
19478 seg = generation_start_segment (gen);
19485 return total_committed;
19488 size_t gc_heap::get_total_committed_size()
19490 size_t total_committed = 0;
19492 #ifdef MULTIPLE_HEAPS
19495 for (hn = 0; hn < gc_heap::n_heaps; hn++)
19497 gc_heap* hp = gc_heap::g_heaps [hn];
19498 total_committed += hp->committed_size();
19501 total_committed = committed_size();
19502 #endif //MULTIPLE_HEAPS
19504 return total_committed;
19507 size_t gc_heap::committed_size (bool loh_p, size_t* allocated)
19509 int gen_number = (loh_p ? (max_generation + 1) : max_generation);
19510 generation* gen = generation_of (gen_number);
19511 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
19512 size_t total_committed = 0;
19513 size_t total_allocated = 0;
19517 total_committed += heap_segment_committed (seg) - (uint8_t*)seg;
19518 total_allocated += heap_segment_allocated (seg) - (uint8_t*)seg;
19519 seg = heap_segment_next (seg);
19522 *allocated = total_allocated;
19523 return total_committed;
19526 void gc_heap::get_memory_info (uint32_t* memory_load,
19527 uint64_t* available_physical,
19528 uint64_t* available_page_file)
19530 GCToOSInterface::GetMemoryStatus(memory_load, available_physical, available_page_file);
19533 void fire_mark_event (int heap_num, int root_type, size_t bytes_marked)
19535 dprintf (DT_LOG_0, ("-----------[%d]mark %d: %Id", heap_num, root_type, bytes_marked));
19536 FIRE_EVENT(GCMarkWithType, heap_num, root_type, bytes_marked);
19539 //returns TRUE is an overflow happened.
19540 BOOL gc_heap::process_mark_overflow(int condemned_gen_number)
19542 size_t last_promoted_bytes = promoted_bytes (heap_number);
19543 BOOL overflow_p = FALSE;
19545 if ((! (max_overflow_address == 0) ||
19546 ! (min_overflow_address == MAX_PTR)))
19549 // Try to grow the array.
19551 max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length);
19553 if ((new_size * sizeof(mark)) > 100*1024)
19555 size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);
19557 new_size = min(new_max_size, new_size);
19560 if ((mark_stack_array_length < new_size) &&
19561 ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2)))
19563 mark* tmp = new (nothrow) mark [new_size];
19566 delete mark_stack_array;
19567 mark_stack_array = tmp;
19568 mark_stack_array_length = new_size;
19572 uint8_t* min_add = min_overflow_address;
19573 uint8_t* max_add = max_overflow_address;
19574 max_overflow_address = 0;
19575 min_overflow_address = MAX_PTR;
19576 process_mark_overflow_internal (condemned_gen_number, min_add, max_add);
19580 size_t current_promoted_bytes = promoted_bytes (heap_number);
19582 if (current_promoted_bytes != last_promoted_bytes)
19583 fire_mark_event (heap_number, ETW::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
19587 void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
19588 uint8_t* min_add, uint8_t* max_add)
19590 #ifdef MULTIPLE_HEAPS
19591 int thread = heap_number;
19592 #endif //MULTIPLE_HEAPS
19593 BOOL full_p = (condemned_gen_number == max_generation);
19595 dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
19596 #ifdef MULTIPLE_HEAPS
19597 for (int hi = 0; hi < n_heaps; hi++)
19599 gc_heap* hp = g_heaps [(heap_number + hi) % n_heaps];
19605 #endif //MULTIPLE_HEAPS
19606 BOOL small_object_segments = TRUE;
19607 int align_const = get_alignment_constant (small_object_segments);
19608 generation* gen = hp->generation_of (condemned_gen_number);
19609 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
19611 PREFIX_ASSUME(seg != NULL);
19612 uint8_t* o = max (heap_segment_mem (seg), min_add);
19615 uint8_t* end = heap_segment_allocated (seg);
19617 while ((o < end) && (o <= max_add))
19619 assert ((min_add <= o) && (max_add >= o));
19620 dprintf (3, ("considering %Ix", (size_t)o));
19623 mark_through_object (o, TRUE THREAD_NUMBER_ARG);
19626 o = o + Align (size (o), align_const);
19629 if (( seg = heap_segment_next_in_range (seg)) == 0)
19631 if (small_object_segments && full_p)
19633 small_object_segments = FALSE;
19634 align_const = get_alignment_constant (small_object_segments);
19635 seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));
19637 PREFIX_ASSUME(seg != NULL);
19639 o = max (heap_segment_mem (seg), min_add);
19649 o = max (heap_segment_mem (seg), min_add);
19656 // Scanning for promotion for dependent handles need special handling. Because the primary holds a strong
19657 // reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of
19658 // promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the
19659 // promotion scan multiple times.
19660 // This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It
19661 // also has the effect of processing any mark stack overflow.
19663 #ifdef MULTIPLE_HEAPS
19664 // When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC
19665 // worker threads synchronized. The algorithms are sufficiently divergent that we have different
19666 // implementations based on whether MULTIPLE_HEAPS is defined or not.
19668 // Define some static variables used for synchronization in the method below. These should really be defined
19669 // locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class.
19671 // A note about the synchronization used within this method. Communication between the worker threads is
19672 // achieved via two shared booleans (defined below). These both act as latches that are transitioned only from
19673 // false -> true by unsynchronized code. They are only read or reset to false by a single thread under the
19674 // protection of a join.
19675 static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE;
19676 static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE;
19677 static VOLATILE(BOOL) s_fScanRequired;
19678 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19680 // Whenever we call this method there may have been preceding object promotions. So set
19681 // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19682 // based on the how the scanning proceeded).
19683 s_fUnscannedPromotions = TRUE;
19685 // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
19686 // the state of this thread's portion of the dependent handle table. That's because promotions on other
19687 // threads could cause handle promotions to become necessary here. Even if there are definitely no more
19688 // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
19689 // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
19690 // as all the others or they'll get out of step).
19693 // The various worker threads are all currently racing in this code. We need to work out if at least
19694 // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
19695 // dependent handle table when both of the following conditions apply:
19696 // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this
19697 // object happens to correspond to a primary in one of our handles we might potentially have to
19698 // promote the associated secondary).
19699 // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
19701 // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
19702 // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
19703 // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
19704 // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
19705 // follows below. Note that we can't read this outside of the join since on any iteration apart from
19706 // the first threads will be racing between reading this value and completing their previous
19707 // iteration's table scan.
19709 // The second condition is tracked by the dependent handle code itself on a per worker thread basis
19710 // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
19711 // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
19712 // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
19713 // we're safely joined.
19714 if (GCScan::GcDhUnpromotedHandlesExist(sc))
19715 s_fUnpromotedHandles = TRUE;
19717 // Synchronize all the threads so we can read our state variables safely. The shared variable
19718 // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by
19719 // a single thread inside the join.
19720 gc_t_join.join(this, gc_join_scan_dependent_handles);
19721 if (gc_t_join.joined())
19723 // We're synchronized so it's safe to read our shared state variables. We update another shared
19724 // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
19725 // the loop. We scan if there has been at least one object promotion since last time and at least
19726 // one thread has a dependent handle table with a potential handle promotion possible.
19727 s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
19729 // Reset our shared state variables (ready to be set again on this scan or with a good initial
19730 // value for the next call if we're terminating the loop).
19731 s_fUnscannedPromotions = FALSE;
19732 s_fUnpromotedHandles = FALSE;
19734 if (!s_fScanRequired)
19736 // We're terminating the loop. Perform any last operations that require single threaded access.
19737 if (!initial_scan_p)
19739 // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help
19740 // load balance if some of the heaps have an abnormally large workload.
19741 uint8_t* all_heaps_max = 0;
19742 uint8_t* all_heaps_min = MAX_PTR;
19744 for (i = 0; i < n_heaps; i++)
19746 if (all_heaps_max < g_heaps[i]->max_overflow_address)
19747 all_heaps_max = g_heaps[i]->max_overflow_address;
19748 if (all_heaps_min > g_heaps[i]->min_overflow_address)
19749 all_heaps_min = g_heaps[i]->min_overflow_address;
19751 for (i = 0; i < n_heaps; i++)
19753 g_heaps[i]->max_overflow_address = all_heaps_max;
19754 g_heaps[i]->min_overflow_address = all_heaps_min;
19759 // Restart all the workers.
19760 dprintf(3, ("Starting all gc thread mark stack overflow processing"));
19761 gc_t_join.restart();
19764 // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19765 // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
19766 // global flag indicating that at least one object promotion may have occurred (the usual comment
19767 // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
19768 // exit the method since we unconditionally set this variable on method entry anyway).
19769 if (process_mark_overflow(condemned_gen_number))
19770 s_fUnscannedPromotions = TRUE;
19772 // If we decided that no scan was required we can terminate the loop now.
19773 if (!s_fScanRequired)
19776 // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
19777 // processed before we start scanning dependent handle tables (if overflows remain while we scan we
19778 // could miss noting the promotion of some primary objects).
19779 gc_t_join.join(this, gc_join_rescan_dependent_handles);
19780 if (gc_t_join.joined())
19782 // Restart all the workers.
19783 dprintf(3, ("Starting all gc thread for dependent handle promotion"));
19784 gc_t_join.restart();
19787 // If the portion of the dependent handle table managed by this worker has handles that could still be
19788 // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
19789 // could require a rescan of handles on this or other workers.
19790 if (GCScan::GcDhUnpromotedHandlesExist(sc))
19791 if (GCScan::GcDhReScan(sc))
19792 s_fUnscannedPromotions = TRUE;
19795 #else //MULTIPLE_HEAPS
19796 // Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker
19797 // threads synchronized.
19798 void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
19800 UNREFERENCED_PARAMETER(initial_scan_p);
19802 // Whenever we call this method there may have been preceding object promotions. So set
19803 // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
19804 // based on the how the scanning proceeded).
19805 bool fUnscannedPromotions = true;
19807 // Loop until there are either no more dependent handles that can have their secondary promoted or we've
19808 // managed to perform a scan without promoting anything new.
19809 while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
19811 // On each iteration of the loop start with the assumption that no further objects have been promoted.
19812 fUnscannedPromotions = false;
19814 // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
19815 // being visible. If there was an overflow (process_mark_overflow returned true) then additional
19816 // objects now appear to be promoted and we should set the flag.
19817 if (process_mark_overflow(condemned_gen_number))
19818 fUnscannedPromotions = true;
19820 // Perform the scan and set the flag if any promotions resulted.
19821 if (GCScan::GcDhReScan(sc))
19822 fUnscannedPromotions = true;
19825 // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to
19826 // scan any handles at all this is the processing of overflows that may have occurred prior to this method
19828 process_mark_overflow(condemned_gen_number);
19830 #endif //MULTIPLE_HEAPS
19832 void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
19834 assert (settings.concurrent == FALSE);
19837 sc.thread_number = heap_number;
19838 sc.promotion = TRUE;
19839 sc.concurrent = FALSE;
19841 dprintf(2,("---- Mark Phase condemning %d ----", condemned_gen_number));
19842 BOOL full_p = (condemned_gen_number == max_generation);
19847 start = GetCycleCount32();
19850 int gen_to_init = condemned_gen_number;
19851 if (condemned_gen_number == max_generation)
19853 gen_to_init = max_generation + 1;
19855 for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++)
19857 dynamic_data* dd = dynamic_data_of (gen_idx);
19858 dd_begin_data_size (dd) = generation_size (gen_idx) -
19859 dd_fragmentation (dd) -
19860 Align (size (generation_allocation_start (generation_of (gen_idx))));
19861 dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd)));
19862 dd_survived_size (dd) = 0;
19863 dd_pinned_survived_size (dd) = 0;
19864 dd_artificial_pinned_survived_size (dd) = 0;
19865 dd_added_pinned_size (dd) = 0;
19867 dd_padding_size (dd) = 0;
19868 #endif //SHORT_PLUGS
19869 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
19870 dd_num_npinned_plugs (dd) = 0;
19871 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
19874 #ifdef FFIND_OBJECT
19875 if (gen0_must_clear_bricks > 0)
19876 gen0_must_clear_bricks--;
19877 #endif //FFIND_OBJECT
19879 size_t last_promoted_bytes = 0;
19881 promoted_bytes (heap_number) = 0;
19882 reset_mark_stack();
19885 memset (&snoop_stat, 0, sizeof(snoop_stat));
19886 snoop_stat.heap_index = heap_number;
19887 #endif //SNOOP_STATS
19892 //initialize the mark stack
19893 for (int i = 0; i < max_snoop_level; i++)
19895 ((uint8_t**)(mark_stack_array))[i] = 0;
19898 mark_stack_busy() = 1;
19900 #endif //MH_SC_MARK
19902 static uint32_t num_sizedrefs = 0;
19905 static BOOL do_mark_steal_p = FALSE;
19906 #endif //MH_SC_MARK
19908 #ifdef MULTIPLE_HEAPS
19909 gc_t_join.join(this, gc_join_begin_mark_phase);
19910 if (gc_t_join.joined())
19912 #endif //MULTIPLE_HEAPS
19914 maxgen_size_inc_p = false;
19916 num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
19918 #ifdef MULTIPLE_HEAPS
19923 size_t total_heap_size = get_total_heap_size();
19925 if (total_heap_size > (100 * 1024 * 1024))
19927 do_mark_steal_p = TRUE;
19931 do_mark_steal_p = FALSE;
19936 do_mark_steal_p = FALSE;
19938 #endif //MH_SC_MARK
19940 gc_t_join.restart();
19942 #endif //MULTIPLE_HEAPS
19947 //set up the mark lists from g_mark_list
19948 assert (g_mark_list);
19949 #ifdef MULTIPLE_HEAPS
19950 mark_list = &g_mark_list [heap_number*mark_list_size];
19952 mark_list = g_mark_list;
19953 #endif //MULTIPLE_HEAPS
19954 //dont use the mark list for full gc
19955 //because multiple segments are more complex to handle and the list
19956 //is likely to overflow
19957 if (condemned_gen_number != max_generation)
19958 mark_list_end = &mark_list [mark_list_size-1];
19960 mark_list_end = &mark_list [0];
19961 mark_list_index = &mark_list [0];
19964 #ifndef MULTIPLE_HEAPS
19965 shigh = (uint8_t*) 0;
19967 #endif //MULTIPLE_HEAPS
19969 //%type% category = quote (mark);
19971 if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
19973 GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
19974 fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
19975 last_promoted_bytes = promoted_bytes (heap_number);
19977 #ifdef MULTIPLE_HEAPS
19978 gc_t_join.join(this, gc_join_scan_sizedref_done);
19979 if (gc_t_join.joined())
19981 dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots"));
19982 gc_t_join.restart();
19984 #endif //MULTIPLE_HEAPS
19987 dprintf(3,("Marking Roots"));
19989 GCScan::GcScanRoots(GCHeap::Promote,
19990 condemned_gen_number, max_generation,
19993 fire_mark_event (heap_number, ETW::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
19994 last_promoted_bytes = promoted_bytes (heap_number);
19996 #ifdef BACKGROUND_GC
19997 if (recursive_gc_sync::background_running_p())
19999 scan_background_roots (GCHeap::Promote, heap_number, &sc);
20001 #endif //BACKGROUND_GC
20003 #ifdef FEATURE_PREMORTEM_FINALIZATION
20004 dprintf(3, ("Marking finalization data"));
20005 finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0);
20006 #endif // FEATURE_PREMORTEM_FINALIZATION
20008 fire_mark_event (heap_number, ETW::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
20009 last_promoted_bytes = promoted_bytes (heap_number);
20014 dprintf(3,("Marking handle table"));
20015 GCScan::GcScanHandles(GCHeap::Promote,
20016 condemned_gen_number, max_generation,
20018 fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
20019 last_promoted_bytes = promoted_bytes (heap_number);
20023 size_t promoted_before_cards = promoted_bytes (heap_number);
20026 dprintf (3, ("before cards: %Id", promoted_before_cards));
20030 #ifdef MULTIPLE_HEAPS
20031 if (gc_t_join.r_join(this, gc_r_join_update_card_bundle))
20033 #endif //MULTIPLE_HEAPS
20035 #ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
20036 // If we are manually managing card bundles, every write to the card table should already be
20037 // accounted for in the card bundle table so there's nothing to update here.
20038 update_card_table_bundle();
20040 if (card_bundles_enabled())
20042 verify_card_bundles();
20045 #ifdef MULTIPLE_HEAPS
20046 gc_t_join.r_restart();
20048 #endif //MULTIPLE_HEAPS
20049 #endif //CARD_BUNDLE
20051 card_fn mark_object_fn = &gc_heap::mark_object_simple;
20052 #ifdef HEAP_ANALYZE
20053 heap_analyze_success = TRUE;
20054 if (heap_analyze_enabled)
20056 internal_root_array_index = 0;
20058 current_obj_size = 0;
20059 mark_object_fn = &gc_heap::ha_mark_object_simple;
20061 #endif //HEAP_ANALYZE
20063 dprintf(3,("Marking cross generation pointers"));
20064 mark_through_cards_for_segments (mark_object_fn, FALSE);
20066 dprintf(3,("Marking cross generation pointers for large objects"));
20067 mark_through_cards_for_large_objects (mark_object_fn, FALSE);
20069 dprintf (3, ("marked by cards: %Id",
20070 (promoted_bytes (heap_number) - promoted_before_cards)));
20071 fire_mark_event (heap_number, ETW::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
20072 last_promoted_bytes = promoted_bytes (heap_number);
20077 if (do_mark_steal_p)
20081 #endif //MH_SC_MARK
20083 // Dependent handles need to be scanned with a special algorithm (see the header comment on
20084 // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other
20085 // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation
20086 // but in a common case (where there are no dependent handles that are due to be collected) it allows us
20087 // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more
20088 // iterations if required and will also perform processing of any mark stack overflow once the dependent
20089 // handle table has been fully promoted.
20090 GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
20091 scan_dependent_handles(condemned_gen_number, &sc, true);
20093 #ifdef MULTIPLE_HEAPS
20094 dprintf(3, ("Joining for short weak handle scan"));
20095 gc_t_join.join(this, gc_join_null_dead_short_weak);
20096 if (gc_t_join.joined())
20097 #endif //MULTIPLE_HEAPS
20099 #ifdef HEAP_ANALYZE
20100 heap_analyze_enabled = FALSE;
20101 GCToEEInterface::AnalyzeSurvivorsFinished(condemned_gen_number);
20102 #endif // HEAP_ANALYZE
20103 GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc);
20105 #ifdef MULTIPLE_HEAPS
20108 // we used r_join and need to reinitialize states for it here.
20109 gc_t_join.r_init();
20112 //start all threads on the roots.
20113 dprintf(3, ("Starting all gc thread for short weak handle scan"));
20114 gc_t_join.restart();
20115 #endif //MULTIPLE_HEAPS
20119 // null out the target of short weakref that were not promoted.
20120 GCScan::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);
20122 // MTHTS: keep by single thread
20123 #ifdef MULTIPLE_HEAPS
20124 dprintf(3, ("Joining for finalization"));
20125 gc_t_join.join(this, gc_join_scan_finalization);
20126 if (gc_t_join.joined())
20127 #endif //MULTIPLE_HEAPS
20130 #ifdef MULTIPLE_HEAPS
20131 //start all threads on the roots.
20132 dprintf(3, ("Starting all gc thread for Finalization"));
20133 gc_t_join.restart();
20134 #endif //MULTIPLE_HEAPS
20137 //Handle finalization.
20138 size_t promoted_bytes_live = promoted_bytes (heap_number);
20140 #ifdef FEATURE_PREMORTEM_FINALIZATION
20141 dprintf (3, ("Finalize marking"));
20142 finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
20144 GCToEEInterface::DiagWalkFReachableObjects(__this);
20145 #endif // FEATURE_PREMORTEM_FINALIZATION
20147 // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
20148 // for finalization. As before scan_dependent_handles will also process any mark stack overflow.
20149 scan_dependent_handles(condemned_gen_number, &sc, false);
20151 #ifdef MULTIPLE_HEAPS
20152 dprintf(3, ("Joining for weak pointer deletion"));
20153 gc_t_join.join(this, gc_join_null_dead_long_weak);
20154 if (gc_t_join.joined())
20156 //start all threads on the roots.
20157 dprintf(3, ("Starting all gc thread for weak pointer deletion"));
20158 gc_t_join.restart();
20160 #endif //MULTIPLE_HEAPS
20162 // null out the target of long weakref that were not promoted.
20163 GCScan::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);
20165 // MTHTS: keep by single thread
20166 #ifdef MULTIPLE_HEAPS
20168 #ifdef PARALLEL_MARK_LIST_SORT
20169 // unsigned long start = GetCycleCount32();
20171 // printf("sort_mark_list took %u cycles\n", GetCycleCount32() - start);
20172 #endif //PARALLEL_MARK_LIST_SORT
20175 dprintf (3, ("Joining for sync block cache entry scanning"));
20176 gc_t_join.join(this, gc_join_null_dead_syncblk);
20177 if (gc_t_join.joined())
20178 #endif //MULTIPLE_HEAPS
20180 // scan for deleted entries in the syncblk cache
20181 GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
20183 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
20184 if (g_fEnableAppDomainMonitoring)
20186 size_t promoted_all_heaps = 0;
20187 #ifdef MULTIPLE_HEAPS
20188 for (int i = 0; i < n_heaps; i++)
20190 promoted_all_heaps += promoted_bytes (i);
20193 promoted_all_heaps = promoted_bytes (heap_number);
20194 #endif //MULTIPLE_HEAPS
20195 GCToEEInterface::RecordTotalSurvivedBytes(promoted_all_heaps);
20197 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
20199 #ifdef MULTIPLE_HEAPS
20202 #ifndef PARALLEL_MARK_LIST_SORT
20203 //compact g_mark_list and sort it.
20204 combine_mark_lists();
20205 #endif //PARALLEL_MARK_LIST_SORT
20208 //decide on promotion
20209 if (!settings.promotion)
20212 for (int n = 0; n <= condemned_gen_number;n++)
20214 m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1);
20217 for (int i = 0; i < n_heaps; i++)
20219 dynamic_data* dd = g_heaps[i]->dynamic_data_of (min (condemned_gen_number +1,
20221 size_t older_gen_size = (dd_current_size (dd) +
20222 (dd_desired_allocation (dd) -
20223 dd_new_allocation (dd)));
20225 if ((m > (older_gen_size)) ||
20226 (promoted_bytes (i) > m))
20228 settings.promotion = TRUE;
20234 if (do_mark_steal_p)
20236 size_t objects_checked_count = 0;
20237 size_t zero_ref_count = 0;
20238 size_t objects_marked_count = 0;
20239 size_t check_level_count = 0;
20240 size_t busy_count = 0;
20241 size_t interlocked_count = 0;
20242 size_t partial_mark_parent_count = 0;
20243 size_t stolen_or_pm_count = 0;
20244 size_t stolen_entry_count = 0;
20245 size_t pm_not_ready_count = 0;
20246 size_t normal_count = 0;
20247 size_t stack_bottom_clear_count = 0;
20249 for (int i = 0; i < n_heaps; i++)
20251 gc_heap* hp = g_heaps[i];
20252 hp->print_snoop_stat();
20253 objects_checked_count += hp->snoop_stat.objects_checked_count;
20254 zero_ref_count += hp->snoop_stat.zero_ref_count;
20255 objects_marked_count += hp->snoop_stat.objects_marked_count;
20256 check_level_count += hp->snoop_stat.check_level_count;
20257 busy_count += hp->snoop_stat.busy_count;
20258 interlocked_count += hp->snoop_stat.interlocked_count;
20259 partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count;
20260 stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count;
20261 stolen_entry_count += hp->snoop_stat.stolen_entry_count;
20262 pm_not_ready_count += hp->snoop_stat.pm_not_ready_count;
20263 normal_count += hp->snoop_stat.normal_count;
20264 stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count;
20269 printf ("-------total stats-------\n");
20270 printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n",
20271 "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
20272 printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
20273 objects_checked_count,
20275 objects_marked_count,
20279 partial_mark_parent_count,
20280 stolen_or_pm_count,
20281 stolen_entry_count,
20282 pm_not_ready_count,
20284 stack_bottom_clear_count);
20286 #endif //SNOOP_STATS
20288 //start all threads.
20289 dprintf(3, ("Starting all threads for end of mark phase"));
20290 gc_t_join.restart();
20291 #else //MULTIPLE_HEAPS
20293 //decide on promotion
20294 if (!settings.promotion)
20297 for (int n = 0; n <= condemned_gen_number;n++)
20299 m += (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06);
20301 dynamic_data* dd = dynamic_data_of (min (condemned_gen_number +1,
20303 size_t older_gen_size = (dd_current_size (dd) +
20304 (dd_desired_allocation (dd) -
20305 dd_new_allocation (dd)));
20307 dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id",
20308 m, promoted_bytes (heap_number), older_gen_size));
20310 if ((m > older_gen_size) ||
20311 (promoted_bytes (heap_number) > m))
20313 settings.promotion = TRUE;
20317 #endif //MULTIPLE_HEAPS
20320 #ifdef MULTIPLE_HEAPS
20322 #ifdef PARALLEL_MARK_LIST_SORT
20323 // start = GetCycleCount32();
20324 merge_mark_lists();
20325 // printf("merge_mark_lists took %u cycles\n", GetCycleCount32() - start);
20326 #endif //PARALLEL_MARK_LIST_SORT
20328 #endif //MULTIPLE_HEAPS
20330 #ifdef BACKGROUND_GC
20331 total_promoted_bytes = promoted_bytes (heap_number);
20332 #endif //BACKGROUND_GC
20334 promoted_bytes (heap_number) -= promoted_bytes_live;
20337 finish = GetCycleCount32();
20338 mark_time = finish - start;
20341 dprintf(2,("---- End of mark phase ----"));
20345 void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high)
20347 dprintf (3, ("Pinning %Ix", (size_t)o));
20348 if ((o >= low) && (o < high))
20350 dprintf(3,("^%Ix^", (size_t)o));
20353 #ifdef FEATURE_EVENT_TRACE
20354 if(EVENT_ENABLED(PinObjectAtGCTime))
20356 fire_etw_pin_object_event(o, ppObject);
20358 #endif // FEATURE_EVENT_TRACE
20360 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
20361 num_pinned_objects++;
20362 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
20366 #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
20367 size_t gc_heap::get_total_pinned_objects()
20369 #ifdef MULTIPLE_HEAPS
20370 size_t total_num_pinned_objects = 0;
20371 for (int i = 0; i < gc_heap::n_heaps; i++)
20373 gc_heap* hp = gc_heap::g_heaps[i];
20374 total_num_pinned_objects += hp->num_pinned_objects;
20376 return total_num_pinned_objects;
20377 #else //MULTIPLE_HEAPS
20378 return num_pinned_objects;
20379 #endif //MULTIPLE_HEAPS
20381 #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
20383 void gc_heap::reset_mark_stack ()
20385 reset_pinned_queue();
20386 max_overflow_address = 0;
20387 min_overflow_address = MAX_PTR;
20390 #ifdef FEATURE_STRUCTALIGN
20392 // The word with left child, right child, and align info is laid out as follows:
20394 // | upper short word | lower short word |
20395 // |<------------> <----->|<------------> <----->|
20396 // | left child info hi| right child info lo|
20397 // x86: | 10 bits 6 bits| 10 bits 6 bits|
20399 // where left/right child are signed values and concat(info hi, info lo) is unsigned.
20401 // The "align info" encodes two numbers: the required alignment (a power of two)
20402 // and the misalignment (the number of machine words the destination address needs
20403 // to be adjusted by to provide alignment - so this number is always smaller than
20404 // the required alignment). Thus, the two can be represented as the "logical or"
20405 // of the two numbers. Note that the actual pad is computed from the misalignment
20406 // by adding the alignment iff the misalignment is non-zero and less than min_obj_size.
20409 // The number of bits in a brick.
20410 #if defined (_TARGET_AMD64_)
20411 #define brick_bits (12)
20413 #define brick_bits (11)
20414 #endif //_TARGET_AMD64_
20415 C_ASSERT(brick_size == (1 << brick_bits));
20417 // The number of bits needed to represent the offset to a child node.
20418 // "brick_bits + 1" allows us to represent a signed offset within a brick.
20419 #define child_bits (brick_bits + 1 - LOG2_PTRSIZE)
20421 // The number of bits in each of the pad hi, pad lo fields.
20422 #define pad_bits (sizeof(short) * 8 - child_bits)
20424 #define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1))
20425 #define pad_mask ((1 << pad_bits) - 1)
20426 #define pad_from_short(w) ((size_t)(w) & pad_mask)
20427 #else // FEATURE_STRUCTALIGN
20428 #define child_from_short(w) (w)
20429 #endif // FEATURE_STRUCTALIGN
20432 short node_left_child(uint8_t* node)
20434 return child_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20438 void set_node_left_child(uint8_t* node, ptrdiff_t val)
20440 assert (val > -(ptrdiff_t)brick_size);
20441 assert (val < (ptrdiff_t)brick_size);
20442 assert (Aligned (val));
20443 #ifdef FEATURE_STRUCTALIGN
20444 size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left);
20445 ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20446 #else // FEATURE_STRUCTALIGN
20447 ((plug_and_pair*)node)[-1].m_pair.left = (short)val;
20448 #endif // FEATURE_STRUCTALIGN
20449 assert (node_left_child (node) == val);
20453 short node_right_child(uint8_t* node)
20455 return child_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20459 void set_node_right_child(uint8_t* node, ptrdiff_t val)
20461 assert (val > -(ptrdiff_t)brick_size);
20462 assert (val < (ptrdiff_t)brick_size);
20463 assert (Aligned (val));
20464 #ifdef FEATURE_STRUCTALIGN
20465 size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right);
20466 ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
20467 #else // FEATURE_STRUCTALIGN
20468 ((plug_and_pair*)node)[-1].m_pair.right = (short)val;
20469 #endif // FEATURE_STRUCTALIGN
20470 assert (node_right_child (node) == val);
20473 #ifdef FEATURE_STRUCTALIGN
20474 void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad)
20476 // Extract the single-number aligninfo from the fields.
20477 short left = ((plug_and_pair*)node)[-1].m_pair.left;
20478 short right = ((plug_and_pair*)node)[-1].m_pair.right;
20479 ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right);
20480 ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT;
20482 // Replicate the topmost bit into all lower bits.
20483 ptrdiff_t x = aligninfo;
20489 // Clear all bits but the highest.
20490 requiredAlignment = (int)(x ^ (x >> 1));
20491 pad = aligninfo - requiredAlignment;
20492 pad += AdjustmentForMinPadSize(pad, requiredAlignment);
20496 ptrdiff_t node_alignpad (uint8_t* node)
20498 int requiredAlignment;
20499 ptrdiff_t alignpad;
20500 node_aligninfo (node, requiredAlignment, alignpad);
20504 void clear_node_aligninfo (uint8_t* node)
20506 ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits;
20507 ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits;
20510 void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad)
20512 // Encode the alignment requirement and alignment offset as a single number
20513 // as described above.
20514 ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1));
20515 assert (Aligned (aligninfo));
20516 ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT;
20517 assert (aligninfo_shifted < (1 << (pad_bits + pad_bits)));
20519 ptrdiff_t hi = aligninfo_shifted >> pad_bits;
20520 assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0);
20521 ((plug_and_pair*)node)[-1].m_pair.left |= hi;
20523 ptrdiff_t lo = aligninfo_shifted & pad_mask;
20524 assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0);
20525 ((plug_and_pair*)node)[-1].m_pair.right |= lo;
20528 int requiredAlignment2;
20530 node_aligninfo (node, requiredAlignment2, pad2);
20531 assert (requiredAlignment == requiredAlignment2);
20532 assert (pad == pad2);
20535 #endif // FEATURE_STRUCTALIGN
20538 void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20540 ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc);
20545 ptrdiff_t loh_node_relocation_distance(uint8_t* node)
20547 return (((loh_obj_and_pad*)node)[-1].reloc);
20551 ptrdiff_t node_relocation_distance (uint8_t* node)
20553 return (((plug_and_reloc*)(node))[-1].reloc & ~3);
20557 void set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
20559 assert (val == (val & ~3));
20560 ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
20561 //clear the left bit and the relocation field
20567 #define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2)
20569 #define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2;
20571 #ifndef FEATURE_STRUCTALIGN
20572 void set_node_realigned(uint8_t* node)
20574 ((plug_and_reloc*)(node))[-1].reloc |= 1;
20577 void clear_node_realigned(uint8_t* node)
20579 #ifdef RESPECT_LARGE_ALIGNMENT
20580 ((plug_and_reloc*)(node))[-1].reloc &= ~1;
20582 UNREFERENCED_PARAMETER(node);
20583 #endif //RESPECT_LARGE_ALIGNMENT
20585 #endif // FEATURE_STRUCTALIGN
20588 size_t node_gap_size (uint8_t* node)
20590 return ((plug_and_gap *)node)[-1].gap;
20593 void set_gap_size (uint8_t* node, size_t size)
20595 assert (Aligned (size));
20597 // clear the 2 uint32_t used by the node.
20598 ((plug_and_gap *)node)[-1].reloc = 0;
20599 ((plug_and_gap *)node)[-1].lr =0;
20600 ((plug_and_gap *)node)[-1].gap = size;
20602 assert ((size == 0 )||(size >= sizeof(plug_and_reloc)));
20606 uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number,
20607 uint8_t* tree, uint8_t* last_node)
20609 dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]",
20610 (size_t)new_node, brick_of(new_node),
20611 (size_t)tree, brick_of(tree),
20612 (size_t)last_node, brick_of(last_node),
20614 if (power_of_two_p (sequence_number))
20616 set_node_left_child (new_node, (tree - new_node));
20617 dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node)));
20622 if (oddp (sequence_number))
20624 set_node_right_child (last_node, (new_node - last_node));
20625 dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node)));
20629 uint8_t* earlier_node = tree;
20630 size_t imax = logcount(sequence_number) - 2;
20631 for (size_t i = 0; i != imax; i++)
20633 earlier_node = earlier_node + node_right_child (earlier_node);
20635 int tmp_offset = node_right_child (earlier_node);
20636 assert (tmp_offset); // should never be empty
20637 set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node));
20638 set_node_right_child (earlier_node, (new_node - earlier_node));
20640 dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix",
20641 new_node, ((earlier_node + tmp_offset ) - new_node),
20642 earlier_node, (new_node - earlier_node)));
20648 size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick,
20649 uint8_t* x, uint8_t* plug_end)
20651 dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix",
20652 tree, current_brick, x, plug_end));
20656 dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix",
20657 current_brick, (size_t)(tree - brick_address (current_brick)), tree));
20658 set_brick (current_brick, (tree - brick_address (current_brick)));
20662 dprintf (3, ("b- %Ix->-1", current_brick));
20663 set_brick (current_brick, -1);
20665 size_t b = 1 + current_brick;
20666 ptrdiff_t offset = 0;
20667 size_t last_br = brick_of (plug_end-1);
20668 current_brick = brick_of (x-1);
20669 dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick));
20670 while (b <= current_brick)
20674 set_brick (b, --offset);
20682 return brick_of (x);
20685 void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate)
20688 // We should never demote big plugs to gen0.
20689 if (gen == youngest_generation)
20691 heap_segment* seg = ephemeral_heap_segment;
20692 size_t mark_stack_large_bos = mark_stack_bos;
20693 size_t large_plug_pos = 0;
20694 while (mark_stack_large_bos < mark_stack_tos)
20696 if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th)
20698 while (mark_stack_bos <= mark_stack_large_bos)
20700 size_t entry = deque_pinned_plug();
20701 size_t len = pinned_len (pinned_plug_of (entry));
20702 uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20703 if (len > demotion_plug_len_th)
20705 dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len)));
20707 pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen);
20708 assert(mark_stack_array[entry].len == 0 ||
20709 mark_stack_array[entry].len >= Align(min_obj_size));
20710 generation_allocation_pointer (consing_gen) = plug + len;
20711 generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg);
20712 set_allocator_next_pin (consing_gen);
20716 mark_stack_large_bos++;
20721 generation_plan_allocation_start (gen) =
20722 allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1);
20723 generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20724 size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20725 if (next_plug_to_allocate)
20727 size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen));
20728 if (allocation_left > dist_to_next_plug)
20730 allocation_left = dist_to_next_plug;
20733 if (allocation_left < Align (min_obj_size))
20735 generation_plan_allocation_start_size (gen) += allocation_left;
20736 generation_allocation_pointer (consing_gen) += allocation_left;
20739 dprintf (2, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num,
20740 generation_plan_allocation_start (gen),
20741 generation_plan_allocation_start_size (gen),
20742 generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen),
20743 next_plug_to_allocate));
20746 void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen)
20748 BOOL adjacentp = FALSE;
20750 generation_plan_allocation_start (gen) =
20751 allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0,
20754 #endif //SHORT_PLUGS
20755 FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG);
20757 generation_plan_allocation_start_size (gen) = Align (min_obj_size);
20758 size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
20759 if ((allocation_left < Align (min_obj_size)) &&
20760 (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen))))
20762 generation_plan_allocation_start_size (gen) += allocation_left;
20763 generation_allocation_pointer (consing_gen) += allocation_left;
20766 dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num,
20767 generation_plan_allocation_start (consing_gen),
20768 generation_allocation_pointer (consing_gen),
20769 generation_allocation_limit (consing_gen)));
20772 void gc_heap::plan_generation_starts (generation*& consing_gen)
20774 //make sure that every generation has a planned allocation start
20775 int gen_number = settings.condemned_generation;
20776 while (gen_number >= 0)
20778 if (gen_number < max_generation)
20780 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20782 generation* gen = generation_of (gen_number);
20783 if (0 == generation_plan_allocation_start (gen))
20785 plan_generation_start (gen, consing_gen, 0);
20786 assert (generation_plan_allocation_start (gen));
20790 // now we know the planned allocation size
20791 heap_segment_plan_allocated (ephemeral_heap_segment) =
20792 generation_allocation_pointer (consing_gen);
20795 void gc_heap::advance_pins_for_demotion (generation* gen)
20797 uint8_t* original_youngest_start = generation_allocation_start (youngest_generation);
20798 heap_segment* seg = ephemeral_heap_segment;
20800 if ((!(pinned_plug_que_empty_p())))
20802 size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation));
20803 size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted;
20804 size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen);
20805 float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip;
20806 float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1)));
20807 if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30))
20809 while (!pinned_plug_que_empty_p() &&
20810 (pinned_plug (oldest_pin()) < original_youngest_start))
20812 size_t entry = deque_pinned_plug();
20813 size_t len = pinned_len (pinned_plug_of (entry));
20814 uint8_t* plug = pinned_plug (pinned_plug_of(entry));
20815 pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen);
20816 assert(mark_stack_array[entry].len == 0 ||
20817 mark_stack_array[entry].len >= Align(min_obj_size));
20818 generation_allocation_pointer (gen) = plug + len;
20819 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
20820 set_allocator_next_pin (gen);
20822 //Add the size of the pinned plug to the right pinned allocations
20823 //find out which gen this pinned plug came from
20824 int frgn = object_gennum (plug);
20825 if ((frgn != (int)max_generation) && settings.promotion)
20827 int togn = object_gennum_plan (plug);
20828 generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
20831 generation_pinned_allocation_compact_size (generation_of (togn)) += len;
20835 dprintf (2, ("skipping gap %d, pin %Ix (%Id)",
20836 pinned_len (pinned_plug_of (entry)), plug, len));
20839 dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d",
20840 gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100)));
20844 void gc_heap::process_ephemeral_boundaries (uint8_t* x,
20845 int& active_new_gen_number,
20846 int& active_old_gen_number,
20847 generation*& consing_gen,
20848 BOOL& allocate_in_condemned)
20851 if ((active_old_gen_number > 0) &&
20852 (x >= generation_allocation_start (generation_of (active_old_gen_number - 1))))
20854 dprintf (2, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x));
20856 if (!pinned_plug_que_empty_p())
20858 dprintf (2, ("oldest pin: %Ix(%Id)",
20859 pinned_plug (oldest_pin()),
20860 (x - pinned_plug (oldest_pin()))));
20863 if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation))
20865 active_new_gen_number--;
20868 active_old_gen_number--;
20869 assert ((!settings.promotion) || (active_new_gen_number>0));
20871 if (active_new_gen_number == (max_generation - 1))
20873 #ifdef FREE_USAGE_STATS
20874 if (settings.condemned_generation == max_generation)
20876 // We need to do this before we skip the rest of the pinned plugs.
20877 generation* gen_2 = generation_of (max_generation);
20878 generation* gen_1 = generation_of (max_generation - 1);
20880 size_t total_num_pinned_free_spaces_left = 0;
20882 // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is.
20883 for (int j = 0; j < NUM_GEN_POWER2; j++)
20885 dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)",
20889 gen_2->gen_current_pinned_free_spaces[j],
20890 gen_2->gen_plugs[j], gen_1->gen_plugs[j],
20891 (gen_2->gen_plugs[j] + gen_1->gen_plugs[j])));
20893 total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j];
20896 float pinned_free_list_efficiency = 0;
20897 size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2);
20898 if (total_pinned_free_space != 0)
20900 pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space;
20903 dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left",
20905 generation_allocated_in_pinned_free (gen_2),
20906 total_pinned_free_space,
20907 (int)(pinned_free_list_efficiency * 100),
20908 generation_pinned_free_obj_space (gen_2),
20909 total_num_pinned_free_spaces_left));
20911 #endif //FREE_USAGE_STATS
20913 //Go past all of the pinned plugs for this generation.
20914 while (!pinned_plug_que_empty_p() &&
20915 (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment)))
20917 size_t entry = deque_pinned_plug();
20918 mark* m = pinned_plug_of (entry);
20919 uint8_t* plug = pinned_plug (m);
20920 size_t len = pinned_len (m);
20921 // detect pinned block in different segment (later) than
20922 // allocation segment, skip those until the oldest pin is in the ephemeral seg.
20923 // adjust the allocation segment along the way (at the end it will
20924 // be the ephemeral segment.
20925 heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen));
20927 PREFIX_ASSUME(nseg != NULL);
20929 while (!((plug >= generation_allocation_pointer (consing_gen))&&
20930 (plug < heap_segment_allocated (nseg))))
20932 //adjust the end of the segment to be the end of the plug
20933 assert (generation_allocation_pointer (consing_gen)>=
20934 heap_segment_mem (nseg));
20935 assert (generation_allocation_pointer (consing_gen)<=
20936 heap_segment_committed (nseg));
20938 heap_segment_plan_allocated (nseg) =
20939 generation_allocation_pointer (consing_gen);
20940 //switch allocation segment
20941 nseg = heap_segment_next_rw (nseg);
20942 generation_allocation_segment (consing_gen) = nseg;
20943 //reset the allocation pointer and limits
20944 generation_allocation_pointer (consing_gen) =
20945 heap_segment_mem (nseg);
20947 set_new_pin_info (m, generation_allocation_pointer (consing_gen));
20948 assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size));
20949 generation_allocation_pointer (consing_gen) = plug + len;
20950 generation_allocation_limit (consing_gen) =
20951 generation_allocation_pointer (consing_gen);
20953 allocate_in_condemned = TRUE;
20954 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
20957 if (active_new_gen_number != max_generation)
20959 if (active_new_gen_number == (max_generation - 1))
20961 maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
20962 if (!demote_gen1_p)
20963 advance_pins_for_demotion (consing_gen);
20966 plan_generation_start (generation_of (active_new_gen_number), consing_gen, x);
20968 dprintf (2, ("process eph: allocated gen%d start at %Ix",
20969 active_new_gen_number,
20970 generation_plan_allocation_start (generation_of (active_new_gen_number))));
20972 if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p())
20974 uint8_t* pplug = pinned_plug (oldest_pin());
20975 if (object_gennum (pplug) > 0)
20977 demotion_low = pplug;
20978 dprintf (3, ("process eph: dlow->%Ix", demotion_low));
20982 assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
20990 void gc_heap::seg_clear_mark_bits (heap_segment* seg)
20992 uint8_t* o = heap_segment_mem (seg);
20993 while (o < heap_segment_allocated (seg))
20999 o = o + Align (size (o));
21003 #ifdef FEATURE_BASICFREEZE
21004 void gc_heap::sweep_ro_segments (heap_segment* start_seg)
21006 //go through all of the segment in range and reset the mark bit
21007 //TODO works only on small object segments
21009 heap_segment* seg = start_seg;
21013 if (heap_segment_read_only_p (seg) &&
21014 heap_segment_in_range_p (seg))
21016 #ifdef BACKGROUND_GC
21017 if (settings.concurrent)
21019 seg_clear_mark_array_bits_soh (seg);
21023 seg_clear_mark_bits (seg);
21025 #else //BACKGROUND_GC
21028 if(gc_can_use_concurrent)
21030 clear_mark_array (max (heap_segment_mem (seg), lowest_address),
21031 min (heap_segment_allocated (seg), highest_address),
21032 FALSE); // read_only segments need the mark clear
21035 seg_clear_mark_bits (seg);
21036 #endif //MARK_ARRAY
21038 #endif //BACKGROUND_GC
21040 seg = heap_segment_next (seg);
21043 #endif // FEATURE_BASICFREEZE
21045 #ifdef FEATURE_LOH_COMPACTION
21047 BOOL gc_heap::loh_pinned_plug_que_empty_p()
21049 return (loh_pinned_queue_bos == loh_pinned_queue_tos);
21052 void gc_heap::loh_set_allocator_next_pin()
21054 if (!(loh_pinned_plug_que_empty_p()))
21056 mark* oldest_entry = loh_oldest_pin();
21057 uint8_t* plug = pinned_plug (oldest_entry);
21058 generation* gen = large_object_generation;
21059 if ((plug >= generation_allocation_pointer (gen)) &&
21060 (plug < generation_allocation_limit (gen)))
21062 generation_allocation_limit (gen) = pinned_plug (oldest_entry);
21065 assert (!((plug < generation_allocation_pointer (gen)) &&
21066 (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
21070 size_t gc_heap::loh_deque_pinned_plug ()
21072 size_t m = loh_pinned_queue_bos;
21073 loh_pinned_queue_bos++;
21078 mark* gc_heap::loh_pinned_plug_of (size_t bos)
21080 return &loh_pinned_queue[bos];
21084 mark* gc_heap::loh_oldest_pin()
21086 return loh_pinned_plug_of (loh_pinned_queue_bos);
21089 // If we can't grow the queue, then don't compact.
21090 BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len)
21092 assert(len >= Align(min_obj_size, get_alignment_constant (FALSE)));
21094 if (loh_pinned_queue_length <= loh_pinned_queue_tos)
21096 if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH))
21101 dprintf (3, (" P: %Ix(%Id)", plug, len));
21102 mark& m = loh_pinned_queue[loh_pinned_queue_tos];
21105 loh_pinned_queue_tos++;
21106 loh_set_allocator_next_pin();
21111 BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit)
21113 dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)",
21115 (2* AlignQword (loh_padding_obj_size) + size),
21118 (alloc_limit - alloc_pointer)));
21120 return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) + size) <= alloc_limit);
21123 uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
21125 UNREFERENCED_PARAMETER(old_loc);
21127 generation* gen = large_object_generation;
21128 dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id",
21129 generation_allocation_pointer (gen),
21130 generation_allocation_limit (gen),
21135 heap_segment* seg = generation_allocation_segment (gen);
21136 if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen))))
21138 if ((!(loh_pinned_plug_que_empty_p()) &&
21139 (generation_allocation_limit (gen) ==
21140 pinned_plug (loh_oldest_pin()))))
21142 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21143 size_t len = pinned_len (m);
21144 uint8_t* plug = pinned_plug (m);
21145 dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
21146 pinned_len (m) = plug - generation_allocation_pointer (gen);
21147 generation_allocation_pointer (gen) = plug + len;
21149 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21150 loh_set_allocator_next_pin();
21151 dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)",
21152 generation_allocation_pointer (gen),
21153 generation_allocation_limit (gen),
21154 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21159 if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
21161 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21162 dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen)));
21166 if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
21168 heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
21169 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21170 dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen)));
21174 if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) &&
21175 (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size)))))
21177 dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg),
21178 (generation_allocation_pointer (gen) + size)));
21180 heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
21181 generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
21183 dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)",
21184 generation_allocation_pointer (gen),
21185 generation_allocation_limit (gen),
21186 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21190 heap_segment* next_seg = heap_segment_next (seg);
21191 assert (generation_allocation_pointer (gen)>=
21192 heap_segment_mem (seg));
21193 // Verify that all pinned plugs for this segment are consumed
21194 if (!loh_pinned_plug_que_empty_p() &&
21195 ((pinned_plug (loh_oldest_pin()) <
21196 heap_segment_allocated (seg)) &&
21197 (pinned_plug (loh_oldest_pin()) >=
21198 generation_allocation_pointer (gen))))
21200 LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
21201 pinned_plug (loh_oldest_pin())));
21202 dprintf (1236, ("queue empty: %d", loh_pinned_plug_que_empty_p()));
21205 assert (generation_allocation_pointer (gen)>=
21206 heap_segment_mem (seg));
21207 assert (generation_allocation_pointer (gen)<=
21208 heap_segment_committed (seg));
21209 heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);
21213 // for LOH do we want to try starting from the first LOH every time though?
21214 generation_allocation_segment (gen) = next_seg;
21215 generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
21216 generation_allocation_limit (gen) = generation_allocation_pointer (gen);
21218 dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)",
21219 generation_allocation_pointer (gen),
21220 generation_allocation_limit (gen),
21221 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21225 dprintf (1, ("We ran out of space compacting, shouldn't happen"));
21231 loh_set_allocator_next_pin();
21233 dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)",
21234 generation_allocation_pointer (gen),
21235 generation_allocation_limit (gen),
21236 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21243 assert (generation_allocation_pointer (gen)>=
21244 heap_segment_mem (generation_allocation_segment (gen)));
21245 uint8_t* result = generation_allocation_pointer (gen);
21246 size_t loh_pad = AlignQword (loh_padding_obj_size);
21248 generation_allocation_pointer (gen) += size + loh_pad;
21249 assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
21251 dprintf (1235, ("p: %Ix, l: %Ix (%Id)",
21252 generation_allocation_pointer (gen),
21253 generation_allocation_limit (gen),
21254 (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
21256 assert (result + loh_pad);
21257 return result + loh_pad;
21261 BOOL gc_heap::loh_compaction_requested()
21263 // If hard limit is specified GC will automatically decide if LOH needs to be compacted.
21264 return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default));
21268 void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p)
21270 if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once))
21272 if (all_heaps_compacted_p)
21274 // If the compaction mode says to compact once and we are going to compact LOH,
21275 // we need to revert it back to no compaction.
21276 loh_compaction_mode = loh_compaction_default;
21281 BOOL gc_heap::plan_loh()
21283 if (!loh_pinned_queue)
21285 loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]);
21286 if (!loh_pinned_queue)
21288 dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction",
21289 LOH_PIN_QUEUE_LENGTH * sizeof (mark)));
21293 loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH;
21296 if (heap_number == 0)
21297 loh_pinned_queue_decay = LOH_PIN_DECAY;
21299 loh_pinned_queue_tos = 0;
21300 loh_pinned_queue_bos = 0;
21302 generation* gen = large_object_generation;
21303 heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
21304 PREFIX_ASSUME(start_seg != NULL);
21305 heap_segment* seg = start_seg;
21306 uint8_t* o = generation_allocation_start (gen);
21308 dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n",
21309 generation_size (max_generation + 1),
21310 generation_free_list_space (gen),
21311 generation_free_obj_space (gen)));
21315 heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
21316 seg = heap_segment_next (seg);
21321 //Skip the generation gap object
21322 o = o + AlignQword (size (o));
21323 // We don't need to ever realloc gen3 start so don't touch it.
21324 heap_segment_plan_allocated (seg) = o;
21325 generation_allocation_pointer (gen) = o;
21326 generation_allocation_limit (gen) = generation_allocation_pointer (gen);
21327 generation_allocation_segment (gen) = start_seg;
21329 uint8_t* free_space_start = o;
21330 uint8_t* free_space_end = o;
21331 uint8_t* new_address = 0;
21335 if (o >= heap_segment_allocated (seg))
21337 seg = heap_segment_next (seg);
21343 o = heap_segment_mem (seg);
21348 free_space_end = o;
21349 size_t size = AlignQword (size (o));
21350 dprintf (1235, ("%Ix(%Id) M", o, size));
21354 // We don't clear the pinned bit yet so we can check in
21355 // compact phase how big a free object we should allocate
21356 // in front of the pinned object. We use the reloc address
21357 // field to store this.
21358 if (!loh_enque_pinned_plug (o, size))
21366 new_address = loh_allocate_in_condemned (o, size);
21369 loh_set_node_relocation_distance (o, (new_address - o));
21370 dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o)));
21373 free_space_start = o;
21374 if (o < heap_segment_allocated (seg))
21376 assert (!marked (o));
21381 while (o < heap_segment_allocated (seg) && !marked (o))
21383 dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0)));
21384 o = o + AlignQword (size (o));
21389 while (!loh_pinned_plug_que_empty_p())
21391 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21392 size_t len = pinned_len (m);
21393 uint8_t* plug = pinned_plug (m);
21395 // detect pinned block in different segment (later) than
21396 // allocation segment
21397 heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen));
21399 while ((plug < generation_allocation_pointer (gen)) ||
21400 (plug >= heap_segment_allocated (nseg)))
21402 assert ((plug < heap_segment_mem (nseg)) ||
21403 (plug > heap_segment_reserved (nseg)));
21404 //adjust the end of the segment to be the end of the plug
21405 assert (generation_allocation_pointer (gen)>=
21406 heap_segment_mem (nseg));
21407 assert (generation_allocation_pointer (gen)<=
21408 heap_segment_committed (nseg));
21410 heap_segment_plan_allocated (nseg) =
21411 generation_allocation_pointer (gen);
21412 //switch allocation segment
21413 nseg = heap_segment_next_rw (nseg);
21414 generation_allocation_segment (gen) = nseg;
21415 //reset the allocation pointer and limits
21416 generation_allocation_pointer (gen) =
21417 heap_segment_mem (nseg);
21420 dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
21421 pinned_len (m) = plug - generation_allocation_pointer (gen);
21422 generation_allocation_pointer (gen) = plug + len;
21425 heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
21426 generation_allocation_pointer (gen) = 0;
21427 generation_allocation_limit (gen) = 0;
21432 void gc_heap::compact_loh()
21434 assert (loh_compaction_requested() || heap_hard_limit);
21436 generation* gen = large_object_generation;
21437 heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
21438 PREFIX_ASSUME(start_seg != NULL);
21439 heap_segment* seg = start_seg;
21440 heap_segment* prev_seg = 0;
21441 uint8_t* o = generation_allocation_start (gen);
21443 //Skip the generation gap object
21444 o = o + AlignQword (size (o));
21445 // We don't need to ever realloc gen3 start so don't touch it.
21446 uint8_t* free_space_start = o;
21447 uint8_t* free_space_end = o;
21448 generation_allocator (gen)->clear();
21449 generation_free_list_space (gen) = 0;
21450 generation_free_obj_space (gen) = 0;
21452 loh_pinned_queue_bos = 0;
21456 if (o >= heap_segment_allocated (seg))
21458 heap_segment* next_seg = heap_segment_next (seg);
21460 if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) &&
21461 (seg != start_seg) && !heap_segment_read_only_p (seg))
21463 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
21465 heap_segment_next (prev_seg) = next_seg;
21466 heap_segment_next (seg) = freeable_large_heap_segment;
21467 freeable_large_heap_segment = seg;
21471 if (!heap_segment_read_only_p (seg))
21473 // We grew the segment to accommodate allocations.
21474 if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg))
21476 if ((heap_segment_plan_allocated (seg) - plug_skew) > heap_segment_used (seg))
21478 heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew;
21482 heap_segment_allocated (seg) = heap_segment_plan_allocated (seg);
21483 dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg)));
21484 decommit_heap_segment_pages (seg, 0);
21485 dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix",
21487 heap_segment_allocated (seg),
21488 heap_segment_used (seg),
21489 heap_segment_committed (seg)));
21490 //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew;
21491 dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg)));
21501 o = heap_segment_mem (seg);
21507 free_space_end = o;
21508 size_t size = AlignQword (size (o));
21511 uint8_t* reloc = o;
21516 // We are relying on the fact the pinned objects are always looked at in the same order
21517 // in plan phase and in compact phase.
21518 mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
21519 uint8_t* plug = pinned_plug (m);
21520 assert (plug == o);
21522 loh_pad = pinned_len (m);
21527 loh_pad = AlignQword (loh_padding_obj_size);
21529 reloc += loh_node_relocation_distance (o);
21530 gcmemcopy (reloc, o, size, TRUE);
21533 thread_gap ((reloc - loh_pad), loh_pad, gen);
21536 free_space_start = o;
21537 if (o < heap_segment_allocated (seg))
21539 assert (!marked (o));
21544 while (o < heap_segment_allocated (seg) && !marked (o))
21546 o = o + AlignQword (size (o));
21551 assert (loh_pinned_plug_que_empty_p());
21553 dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n",
21554 generation_size (max_generation + 1),
21555 generation_free_list_space (gen),
21556 generation_free_obj_space (gen)));
21559 void gc_heap::relocate_in_loh_compact()
21561 generation* gen = large_object_generation;
21562 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
21563 uint8_t* o = generation_allocation_start (gen);
21565 //Skip the generation gap object
21566 o = o + AlignQword (size (o));
21568 relocate_args args;
21570 args.high = gc_high;
21571 args.last_plug = 0;
21575 if (o >= heap_segment_allocated (seg))
21577 seg = heap_segment_next (seg);
21583 o = heap_segment_mem (seg);
21588 size_t size = AlignQword (size (o));
21590 check_class_object_demotion (o);
21591 if (contain_pointers (o))
21593 go_through_object_nostart (method_table (o), o, size(o), pval,
21595 reloc_survivor_helper (pval);
21600 if (o < heap_segment_allocated (seg))
21602 assert (!marked (o));
21607 while (o < heap_segment_allocated (seg) && !marked (o))
21609 o = o + AlignQword (size (o));
21614 dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n",
21615 generation_size (max_generation + 1),
21616 generation_free_list_space (gen),
21617 generation_free_obj_space (gen)));
21620 void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
21622 generation* gen = large_object_generation;
21623 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
21624 uint8_t* o = generation_allocation_start (gen);
21626 //Skip the generation gap object
21627 o = o + AlignQword (size (o));
21631 if (o >= heap_segment_allocated (seg))
21633 seg = heap_segment_next (seg);
21639 o = heap_segment_mem (seg);
21644 size_t size = AlignQword (size (o));
21646 ptrdiff_t reloc = loh_node_relocation_distance (o);
21648 STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
21650 fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);
21653 if (o < heap_segment_allocated (seg))
21655 assert (!marked (o));
21660 while (o < heap_segment_allocated (seg) && !marked (o))
21662 o = o + AlignQword (size (o));
21668 BOOL gc_heap::loh_object_p (uint8_t* o)
21670 #ifdef MULTIPLE_HEAPS
21671 gc_heap* hp = gc_heap::g_heaps [0];
21672 int brick_entry = hp->brick_table[hp->brick_of (o)];
21673 #else //MULTIPLE_HEAPS
21674 int brick_entry = brick_table[brick_of (o)];
21675 #endif //MULTIPLE_HEAPS
21677 return (brick_entry == 0);
21679 #endif //FEATURE_LOH_COMPACTION
21681 void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p,
21682 BOOL& last_pinned_plug_p,
21683 BOOL& pinned_plug_p,
21685 size_t& artificial_pinned_size)
21687 last_npinned_plug_p = FALSE;
21688 last_pinned_plug_p = TRUE;
21689 pinned_plug_p = TRUE;
21690 artificial_pinned_size = ps;
21693 // Because we have the artificial pinning, we can't guarantee that pinned and npinned
21694 // plugs are always interleaved.
21695 void gc_heap::store_plug_gap_info (uint8_t* plug_start,
21697 BOOL& last_npinned_plug_p,
21698 BOOL& last_pinned_plug_p,
21699 uint8_t*& last_pinned_plug,
21700 BOOL& pinned_plug_p,
21701 uint8_t* last_object_in_last_plug,
21702 BOOL& merge_with_last_pin_p,
21703 // this is only for verification purpose
21704 size_t last_plug_len)
21706 UNREFERENCED_PARAMETER(last_plug_len);
21708 if (!last_npinned_plug_p && !last_pinned_plug_p)
21710 //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start));
21711 dprintf (3, ("Free: %Ix", (plug_start - plug_end)));
21712 assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size)));
21713 set_gap_size (plug_start, plug_start - plug_end);
21716 if (pinned (plug_start))
21718 BOOL save_pre_plug_info_p = FALSE;
21720 if (last_npinned_plug_p || last_pinned_plug_p)
21722 //if (last_plug_len == Align (min_obj_size))
21724 // dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
21725 // GCToOSInterface::DebugBreak();
21727 save_pre_plug_info_p = TRUE;
21730 pinned_plug_p = TRUE;
21731 last_npinned_plug_p = FALSE;
21733 if (last_pinned_plug_p)
21735 dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug));
21736 merge_with_last_pin_p = TRUE;
21740 last_pinned_plug_p = TRUE;
21741 last_pinned_plug = plug_start;
21743 enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug);
21745 if (save_pre_plug_info_p)
21747 set_gap_size (plug_start, sizeof (gap_reloc_pair));
21753 if (last_pinned_plug_p)
21755 //if (Align (last_plug_len) < min_pre_pin_obj_size)
21757 // dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
21758 // GCToOSInterface::DebugBreak();
21761 save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
21762 set_gap_size (plug_start, sizeof (gap_reloc_pair));
21764 verify_pins_with_post_plug_info("after saving post plug info");
21766 last_npinned_plug_p = TRUE;
21767 last_pinned_plug_p = FALSE;
21771 void gc_heap::record_interesting_data_point (interesting_data_point idp)
21773 #ifdef GC_CONFIG_DRIVEN
21774 (interesting_data_per_gc[idp])++;
21776 UNREFERENCED_PARAMETER(idp);
21777 #endif //GC_CONFIG_DRIVEN
21781 #pragma warning(push)
21782 #pragma warning(disable:21000) // Suppress PREFast warning about overly large function
21784 void gc_heap::plan_phase (int condemned_gen_number)
21786 size_t old_gen2_allocated = 0;
21787 size_t old_gen2_size = 0;
21789 if (condemned_gen_number == (max_generation - 1))
21791 old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation));
21792 old_gen2_size = generation_size (max_generation);
21795 assert (settings.concurrent == FALSE);
21797 // %type% category = quote (plan);
21801 start = GetCycleCount32();
21804 dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
21805 condemned_gen_number, settings.promotion ? 1 : 0));
21807 generation* condemned_gen1 = generation_of (condemned_gen_number);
21810 BOOL use_mark_list = FALSE;
21811 uint8_t** mark_list_next = &mark_list[0];
21812 #ifdef GC_CONFIG_DRIVEN
21813 dprintf (3, ("total number of marked objects: %Id (%Id)",
21814 (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0]))));
21816 if (mark_list_index >= (mark_list_end + 1))
21817 mark_list_index = mark_list_end + 1;
21819 dprintf (3, ("mark_list length: %Id",
21820 (mark_list_index - &mark_list[0])));
21821 #endif //GC_CONFIG_DRIVEN
21823 if ((condemned_gen_number < max_generation) &&
21824 (mark_list_index <= mark_list_end)
21825 #ifdef BACKGROUND_GC
21826 && (!recursive_gc_sync::background_running_p())
21827 #endif //BACKGROUND_GC
21830 #ifndef MULTIPLE_HEAPS
21831 _sort (&mark_list[0], mark_list_index-1, 0);
21832 //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0)));
21833 //verify_qsort_array (&mark_list[0], mark_list_index-1);
21834 #endif //!MULTIPLE_HEAPS
21835 use_mark_list = TRUE;
21836 get_gc_data_per_heap()->set_mechanism_bit (gc_mark_list_bit);
21840 dprintf (3, ("mark_list not used"));
21845 #ifdef FEATURE_BASICFREEZE
21846 if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) &&
21847 ro_segments_in_range)
21849 sweep_ro_segments (generation_start_segment (condemned_gen1));
21851 #endif // FEATURE_BASICFREEZE
21853 #ifndef MULTIPLE_HEAPS
21854 if (shigh != (uint8_t*)0)
21856 heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21858 PREFIX_ASSUME(seg != NULL);
21860 heap_segment* fseg = seg;
21863 if (slow > heap_segment_mem (seg) &&
21864 slow < heap_segment_reserved (seg))
21868 uint8_t* o = generation_allocation_start (condemned_gen1) +
21869 Align (size (generation_allocation_start (condemned_gen1)));
21872 assert ((slow - o) >= (int)Align (min_obj_size));
21873 #ifdef BACKGROUND_GC
21874 if (current_c_gc_state == c_gc_state_marking)
21876 bgc_clear_batch_mark_array_bits (o, slow);
21878 #endif //BACKGROUND_GC
21879 make_unused_array (o, slow - o);
21884 assert (condemned_gen_number == max_generation);
21885 make_unused_array (heap_segment_mem (seg),
21886 slow - heap_segment_mem (seg));
21889 if (in_range_for_segment (shigh, seg))
21891 #ifdef BACKGROUND_GC
21892 if (current_c_gc_state == c_gc_state_marking)
21894 bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg));
21896 #endif //BACKGROUND_GC
21897 heap_segment_allocated (seg) = shigh + Align (size (shigh));
21899 // test if the segment is in the range of [slow, shigh]
21900 if (!((heap_segment_reserved (seg) >= slow) &&
21901 (heap_segment_mem (seg) <= shigh)))
21903 // shorten it to minimum
21904 heap_segment_allocated (seg) = heap_segment_mem (seg);
21906 seg = heap_segment_next_rw (seg);
21911 heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));
21913 PREFIX_ASSUME(seg != NULL);
21915 heap_segment* sseg = seg;
21918 // shorten it to minimum
21921 // no survivors make all generations look empty
21922 uint8_t* o = generation_allocation_start (condemned_gen1) +
21923 Align (size (generation_allocation_start (condemned_gen1)));
21924 #ifdef BACKGROUND_GC
21925 if (current_c_gc_state == c_gc_state_marking)
21927 bgc_clear_batch_mark_array_bits (o, heap_segment_allocated (seg));
21929 #endif //BACKGROUND_GC
21930 heap_segment_allocated (seg) = o;
21934 assert (condemned_gen_number == max_generation);
21935 #ifdef BACKGROUND_GC
21936 if (current_c_gc_state == c_gc_state_marking)
21938 bgc_clear_batch_mark_array_bits (heap_segment_mem (seg), heap_segment_allocated (seg));
21940 #endif //BACKGROUND_GC
21941 heap_segment_allocated (seg) = heap_segment_mem (seg);
21943 seg = heap_segment_next_rw (seg);
21947 #endif //MULTIPLE_HEAPS
21949 heap_segment* seg1 = heap_segment_rw (generation_start_segment (condemned_gen1));
21951 PREFIX_ASSUME(seg1 != NULL);
21953 uint8_t* end = heap_segment_allocated (seg1);
21954 uint8_t* first_condemned_address = generation_allocation_start (condemned_gen1);
21955 uint8_t* x = first_condemned_address;
21957 assert (!marked (x));
21958 uint8_t* plug_end = x;
21960 size_t sequence_number = 0;
21961 uint8_t* last_node = 0;
21962 size_t current_brick = brick_of (x);
21963 BOOL allocate_in_condemned = ((condemned_gen_number == max_generation)||
21964 (settings.promotion == FALSE));
21965 int active_old_gen_number = condemned_gen_number;
21966 int active_new_gen_number = (allocate_in_condemned ? condemned_gen_number:
21967 (1 + condemned_gen_number));
21968 generation* older_gen = 0;
21969 generation* consing_gen = condemned_gen1;
21970 alloc_list r_free_list [MAX_BUCKET_COUNT];
21972 size_t r_free_list_space = 0;
21973 size_t r_free_obj_space = 0;
21974 size_t r_older_gen_free_list_allocated = 0;
21975 size_t r_older_gen_condemned_allocated = 0;
21976 size_t r_older_gen_end_seg_allocated = 0;
21977 uint8_t* r_allocation_pointer = 0;
21978 uint8_t* r_allocation_limit = 0;
21979 uint8_t* r_allocation_start_region = 0;
21980 heap_segment* r_allocation_segment = 0;
21981 #ifdef FREE_USAGE_STATS
21982 size_t r_older_gen_free_space[NUM_GEN_POWER2];
21983 #endif //FREE_USAGE_STATS
21985 if ((condemned_gen_number < max_generation))
21987 older_gen = generation_of (min (max_generation, 1 + condemned_gen_number));
21988 generation_allocator (older_gen)->copy_to_alloc_list (r_free_list);
21990 r_free_list_space = generation_free_list_space (older_gen);
21991 r_free_obj_space = generation_free_obj_space (older_gen);
21992 #ifdef FREE_USAGE_STATS
21993 memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space));
21994 #endif //FREE_USAGE_STATS
21995 generation_allocate_end_seg_p (older_gen) = FALSE;
21996 r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen);
21997 r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen);
21998 r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen);
21999 r_allocation_limit = generation_allocation_limit (older_gen);
22000 r_allocation_pointer = generation_allocation_pointer (older_gen);
22001 r_allocation_start_region = generation_allocation_context_start_region (older_gen);
22002 r_allocation_segment = generation_allocation_segment (older_gen);
22003 heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen));
22005 PREFIX_ASSUME(start_seg != NULL);
22007 if (start_seg != ephemeral_heap_segment)
22009 assert (condemned_gen_number == (max_generation - 1));
22010 while (start_seg && (start_seg != ephemeral_heap_segment))
22012 assert (heap_segment_allocated (start_seg) >=
22013 heap_segment_mem (start_seg));
22014 assert (heap_segment_allocated (start_seg) <=
22015 heap_segment_reserved (start_seg));
22016 heap_segment_plan_allocated (start_seg) =
22017 heap_segment_allocated (start_seg);
22018 start_seg = heap_segment_next_rw (start_seg);
22023 //reset all of the segment allocated sizes
22025 heap_segment* seg2 = heap_segment_rw (generation_start_segment (condemned_gen1));
22027 PREFIX_ASSUME(seg2 != NULL);
22031 heap_segment_plan_allocated (seg2) =
22032 heap_segment_mem (seg2);
22033 seg2 = heap_segment_next_rw (seg2);
22036 int condemned_gn = condemned_gen_number;
22038 int bottom_gen = 0;
22039 init_free_and_plug();
22041 while (condemned_gn >= bottom_gen)
22043 generation* condemned_gen2 = generation_of (condemned_gn);
22044 generation_allocator (condemned_gen2)->clear();
22045 generation_free_list_space (condemned_gen2) = 0;
22046 generation_free_obj_space (condemned_gen2) = 0;
22047 generation_allocation_size (condemned_gen2) = 0;
22048 generation_condemned_allocated (condemned_gen2) = 0;
22049 generation_pinned_allocated (condemned_gen2) = 0;
22050 generation_free_list_allocated(condemned_gen2) = 0;
22051 generation_end_seg_allocated (condemned_gen2) = 0;
22052 generation_pinned_allocation_sweep_size (condemned_gen2) = 0;
22053 generation_pinned_allocation_compact_size (condemned_gen2) = 0;
22054 #ifdef FREE_USAGE_STATS
22055 generation_pinned_free_obj_space (condemned_gen2) = 0;
22056 generation_allocated_in_pinned_free (condemned_gen2) = 0;
22057 generation_allocated_since_last_pin (condemned_gen2) = 0;
22058 #endif //FREE_USAGE_STATS
22059 generation_plan_allocation_start (condemned_gen2) = 0;
22060 generation_allocation_segment (condemned_gen2) =
22061 heap_segment_rw (generation_start_segment (condemned_gen2));
22063 PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL);
22065 if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment)
22067 generation_allocation_pointer (condemned_gen2) =
22068 heap_segment_mem (generation_allocation_segment (condemned_gen2));
22072 generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2);
22075 generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
22076 generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
22081 BOOL allocate_first_generation_start = FALSE;
22083 if (allocate_in_condemned)
22085 allocate_first_generation_start = TRUE;
22088 dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
22090 demotion_low = MAX_PTR;
22091 demotion_high = heap_segment_allocated (ephemeral_heap_segment);
22093 // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs
22094 // from gen1. They should get promoted to gen2.
22095 demote_gen1_p = !(settings.promotion &&
22096 (settings.condemned_generation == (max_generation - 1)) &&
22097 gen_to_condemn_reasons.is_only_condition (gen_low_card_p));
22099 total_ephemeral_size = 0;
22101 print_free_and_plug ("BP");
22103 for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
22105 generation* temp_gen = generation_of (gen_idx);
22107 dprintf (2, ("gen%d start %Ix, plan start %Ix",
22109 generation_allocation_start (temp_gen),
22110 generation_plan_allocation_start (temp_gen)));
22113 BOOL fire_pinned_plug_events_p = EVENT_ENABLED(PinPlugAtGCTime);
22114 size_t last_plug_len = 0;
22121 assert (heap_segment_allocated (seg1) == end);
22122 heap_segment_allocated (seg1) = plug_end;
22124 current_brick = update_brick_table (tree, current_brick, x, plug_end);
22125 dprintf (3, ("end of seg: new tree, sequence# 0"));
22126 sequence_number = 0;
22129 if (heap_segment_next_rw (seg1))
22131 seg1 = heap_segment_next_rw (seg1);
22132 end = heap_segment_allocated (seg1);
22133 plug_end = x = heap_segment_mem (seg1);
22134 current_brick = brick_of (x);
22135 dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
22144 BOOL last_npinned_plug_p = FALSE;
22145 BOOL last_pinned_plug_p = FALSE;
22147 // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned
22148 // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs -
22149 // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it.
22150 uint8_t* last_pinned_plug = 0;
22151 size_t num_pinned_plugs_in_plug = 0;
22153 uint8_t* last_object_in_plug = 0;
22155 while ((x < end) && marked (x))
22157 uint8_t* plug_start = x;
22158 uint8_t* saved_plug_end = plug_end;
22159 BOOL pinned_plug_p = FALSE;
22160 BOOL npin_before_pin_p = FALSE;
22161 BOOL saved_last_npinned_plug_p = last_npinned_plug_p;
22162 uint8_t* saved_last_object_in_plug = last_object_in_plug;
22163 BOOL merge_with_last_pin_p = FALSE;
22165 size_t added_pinning_size = 0;
22166 size_t artificial_pinned_size = 0;
22168 store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p,
22169 last_pinned_plug, pinned_plug_p, last_object_in_plug,
22170 merge_with_last_pin_p, last_plug_len);
22172 #ifdef FEATURE_STRUCTALIGN
22173 int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment();
22174 size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET;
22175 #endif // FEATURE_STRUCTALIGN
22179 while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p))
22186 #ifdef FEATURE_STRUCTALIGN
22189 int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment();
22190 if (obj_requiredAlignment > requiredAlignment)
22192 requiredAlignment = obj_requiredAlignment;
22193 alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET;
22196 #endif // FEATURE_STRUCTALIGN
22200 dprintf(4, ("+%Ix+", (size_t)xl));
22201 assert ((size (xl) > 0));
22202 assert ((size (xl) <= loh_size_threshold));
22204 last_object_in_plug = xl;
22206 xl = xl + Align (size (xl));
22210 BOOL next_object_marked_p = ((xl < end) && marked (xl));
22214 // If it is pinned we need to extend to the next marked object as we can't use part of
22215 // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all
22216 // references but for now I am just using the next non pinned object for that).
22217 if (next_object_marked_p)
22220 last_object_in_plug = xl;
22221 size_t extra_size = Align (size (xl));
22222 xl = xl + extra_size;
22223 added_pinning_size = extra_size;
22228 if (next_object_marked_p)
22229 npin_before_pin_p = TRUE;
22232 assert (xl <= end);
22235 dprintf (3, ( "%Ix[", (size_t)x));
22237 size_t ps = plug_end - plug_start;
22238 last_plug_len = ps;
22239 dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps));
22240 uint8_t* new_address = 0;
22242 if (!pinned_plug_p)
22244 if (allocate_in_condemned &&
22245 (settings.condemned_generation == max_generation) &&
22246 (ps > OS_PAGE_SIZE))
22248 ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen);
22249 //reloc should >=0 except when we relocate
22250 //across segments and the dest seg is higher then the src
22252 if ((ps > (8*OS_PAGE_SIZE)) &&
22254 ((size_t)reloc < (ps/16)))
22256 dprintf (3, ("Pinning %Ix; reloc would have been: %Ix",
22257 (size_t)plug_start, reloc));
22258 // The last plug couldn't have been a npinned plug or it would have
22259 // included this plug.
22260 assert (!saved_last_npinned_plug_p);
22262 if (last_pinned_plug)
22264 dprintf (3, ("artificially pinned plug merged with last pinned plug"));
22265 merge_with_last_pin_p = TRUE;
22269 enque_pinned_plug (plug_start, FALSE, 0);
22270 last_pinned_plug = plug_start;
22273 convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
22274 ps, artificial_pinned_size);
22279 if (allocate_first_generation_start)
22281 allocate_first_generation_start = FALSE;
22282 plan_generation_start (condemned_gen1, consing_gen, plug_start);
22283 assert (generation_plan_allocation_start (condemned_gen1));
22286 if (seg1 == ephemeral_heap_segment)
22288 process_ephemeral_boundaries (plug_start, active_new_gen_number,
22289 active_old_gen_number,
22291 allocate_in_condemned);
22294 dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number));
22296 dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number);
22297 dd_survived_size (dd_active_old) += ps;
22299 BOOL convert_to_pinned_p = FALSE;
22301 if (!pinned_plug_p)
22303 #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
22304 dd_num_npinned_plugs (dd_active_old)++;
22305 #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
22307 add_gen_plug (active_old_gen_number, ps);
22309 if (allocate_in_condemned)
22311 verify_pins_with_post_plug_info("before aic");
22314 allocate_in_condemned_generations (consing_gen,
22316 active_old_gen_number,
22318 &convert_to_pinned_p,
22319 (npin_before_pin_p ? plug_end : 0),
22321 #endif //SHORT_PLUGS
22322 plug_start REQD_ALIGN_AND_OFFSET_ARG);
22323 verify_pins_with_post_plug_info("after aic");
22327 new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG);
22329 if (new_address != 0)
22331 if (settings.condemned_generation == (max_generation - 1))
22333 dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)",
22334 plug_start, plug_end,
22335 (size_t)new_address, (size_t)new_address + (plug_end - plug_start),
22336 (size_t)(plug_end - plug_start)));
22341 if (generation_allocator(older_gen)->discard_if_no_fit_p())
22343 allocate_in_condemned = TRUE;
22346 new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number,
22348 &convert_to_pinned_p,
22349 (npin_before_pin_p ? plug_end : 0),
22351 #endif //SHORT_PLUGS
22352 plug_start REQD_ALIGN_AND_OFFSET_ARG);
22356 if (convert_to_pinned_p)
22358 assert (last_npinned_plug_p != FALSE);
22359 assert (last_pinned_plug_p == FALSE);
22360 convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
22361 ps, artificial_pinned_size);
22362 enque_pinned_plug (plug_start, FALSE, 0);
22363 last_pinned_plug = plug_start;
22369 //verify that we are at then end of the ephemeral segment
22370 assert (generation_allocation_segment (consing_gen) ==
22371 ephemeral_heap_segment);
22372 //verify that we are near the end
22373 assert ((generation_allocation_pointer (consing_gen) + Align (ps)) <
22374 heap_segment_allocated (ephemeral_heap_segment));
22375 assert ((generation_allocation_pointer (consing_gen) + Align (ps)) >
22376 (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size)));
22380 #ifdef SIMPLE_DPRINTF
22381 dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d)",
22382 (size_t)(node_gap_size (plug_start)),
22383 plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address),
22384 (size_t)new_address + ps, ps,
22385 (is_plug_padded (plug_start) ? 1 : 0)));
22386 #endif //SIMPLE_DPRINTF
22389 if (is_plug_padded (plug_start))
22391 dprintf (3, ("%Ix was padded", plug_start));
22392 dd_padding_size (dd_active_old) += Align (min_obj_size);
22394 #endif //SHORT_PLUGS
22401 if (fire_pinned_plug_events_p)
22403 FIRE_EVENT(PinPlugAtGCTime, plug_start, plug_end,
22404 (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start)));
22407 if (merge_with_last_pin_p)
22409 merge_with_last_pinned_plug (last_pinned_plug, ps);
22413 assert (last_pinned_plug == plug_start);
22414 set_pinned_info (plug_start, ps, consing_gen);
22417 new_address = plug_start;
22419 dprintf (3, ( "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)",
22420 (size_t)(node_gap_size (plug_start)), (size_t)plug_start,
22421 (size_t)plug_end, ps,
22422 (merge_with_last_pin_p ? 1 : 0)));
22424 dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number));
22425 dd_pinned_survived_size (dd_active_old) += plug_end - plug_start;
22426 dd_added_pinned_size (dd_active_old) += added_pinning_size;
22427 dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size;
22429 if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1)))
22431 last_gen1_pin_end = plug_end;
22436 // detect forward allocation in the same segment
22437 assert (!((new_address > plug_start) &&
22438 (new_address < heap_segment_reserved (seg1))));
22441 if (!merge_with_last_pin_p)
22443 if (current_brick != brick_of (plug_start))
22445 current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end);
22446 sequence_number = 0;
22450 set_node_relocation_distance (plug_start, (new_address - plug_start));
22451 if (last_node && (node_relocation_distance (last_node) ==
22452 (node_relocation_distance (plug_start) +
22453 (ptrdiff_t)node_gap_size (plug_start))))
22455 //dprintf(3,( " Lb"));
22456 dprintf (3, ("%Ix Lb", plug_start));
22457 set_node_left (plug_start);
22459 if (0 == sequence_number)
22461 dprintf (2, ("sn: 0, tree is set to %Ix", plug_start));
22465 verify_pins_with_post_plug_info("before insert node");
22467 tree = insert_node (plug_start, ++sequence_number, tree, last_node);
22468 dprintf (3, ("tree is %Ix (b: %Ix) after insert_node", tree, brick_of (tree)));
22469 last_node = plug_start;
22472 // If we detect if the last plug is pinned plug right before us, we should save this gap info
22473 if (!pinned_plug_p)
22475 if (mark_stack_tos > 0)
22477 mark& m = mark_stack_array[mark_stack_tos - 1];
22478 if (m.has_post_plug_info())
22480 uint8_t* post_plug_info_start = m.saved_post_plug_info_start;
22481 size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap));
22482 if ((uint8_t*)current_plug_gap_start == post_plug_info_start)
22484 dprintf (3, ("Ginfo: %Ix, %Ix, %Ix",
22485 *current_plug_gap_start, *(current_plug_gap_start + 1),
22486 *(current_plug_gap_start + 2)));
22487 memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair));
22494 verify_pins_with_post_plug_info("after insert node");
22498 if (num_pinned_plugs_in_plug > 1)
22500 dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug));
22507 while ((mark_list_next < mark_list_index) &&
22508 (*mark_list_next <= x))
22512 if ((mark_list_next < mark_list_index)
22513 #ifdef MULTIPLE_HEAPS
22514 && (*mark_list_next < end) //for multiple segments
22515 #endif //MULTIPLE_HEAPS
22517 x = *mark_list_next;
22525 #ifdef BACKGROUND_GC
22526 if (current_c_gc_state == c_gc_state_marking)
22528 assert (recursive_gc_sync::background_running_p());
22529 while ((xl < end) && !marked (xl))
22531 dprintf (4, ("-%Ix-", (size_t)xl));
22532 assert ((size (xl) > 0));
22533 background_object_marked (xl, TRUE);
22534 xl = xl + Align (size (xl));
22539 #endif //BACKGROUND_GC
22541 while ((xl < end) && !marked (xl))
22543 dprintf (4, ("-%Ix-", (size_t)xl));
22544 assert ((size (xl) > 0));
22545 xl = xl + Align (size (xl));
22549 assert (xl <= end);
22555 while (!pinned_plug_que_empty_p())
22557 if (settings.promotion)
22559 uint8_t* pplug = pinned_plug (oldest_pin());
22560 if (in_range_for_segment (pplug, ephemeral_heap_segment))
22562 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
22563 //allocate all of the generation gaps
22564 while (active_new_gen_number > 0)
22566 active_new_gen_number--;
22568 if (active_new_gen_number == (max_generation - 1))
22570 maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
22571 if (!demote_gen1_p)
22572 advance_pins_for_demotion (consing_gen);
22575 generation* gen = generation_of (active_new_gen_number);
22576 plan_generation_start (gen, consing_gen, 0);
22578 if (demotion_low == MAX_PTR)
22580 demotion_low = pplug;
22581 dprintf (3, ("end plan: dlow->%Ix", demotion_low));
22584 dprintf (2, ("(%d)gen%d plan start: %Ix",
22585 heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen)));
22586 assert (generation_plan_allocation_start (gen));
22591 if (pinned_plug_que_empty_p())
22594 size_t entry = deque_pinned_plug();
22595 mark* m = pinned_plug_of (entry);
22596 uint8_t* plug = pinned_plug (m);
22597 size_t len = pinned_len (m);
22599 // detect pinned block in different segment (later) than
22600 // allocation segment
22601 heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen));
22603 while ((plug < generation_allocation_pointer (consing_gen)) ||
22604 (plug >= heap_segment_allocated (nseg)))
22606 assert ((plug < heap_segment_mem (nseg)) ||
22607 (plug > heap_segment_reserved (nseg)));
22608 //adjust the end of the segment to be the end of the plug
22609 assert (generation_allocation_pointer (consing_gen)>=
22610 heap_segment_mem (nseg));
22611 assert (generation_allocation_pointer (consing_gen)<=
22612 heap_segment_committed (nseg));
22614 heap_segment_plan_allocated (nseg) =
22615 generation_allocation_pointer (consing_gen);
22616 //switch allocation segment
22617 nseg = heap_segment_next_rw (nseg);
22618 generation_allocation_segment (consing_gen) = nseg;
22619 //reset the allocation pointer and limits
22620 generation_allocation_pointer (consing_gen) =
22621 heap_segment_mem (nseg);
22624 set_new_pin_info (m, generation_allocation_pointer (consing_gen));
22625 dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug),
22626 (size_t)(brick_table[brick_of (plug)])));
22628 generation_allocation_pointer (consing_gen) = plug + len;
22629 generation_allocation_limit (consing_gen) =
22630 generation_allocation_pointer (consing_gen);
22631 //Add the size of the pinned plug to the right pinned allocations
22632 //find out which gen this pinned plug came from
22633 int frgn = object_gennum (plug);
22634 if ((frgn != (int)max_generation) && settings.promotion)
22636 generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
22641 plan_generation_starts (consing_gen);
22642 print_free_and_plug ("AP");
22645 #ifdef SIMPLE_DPRINTF
22646 for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
22648 generation* temp_gen = generation_of (gen_idx);
22649 dynamic_data* temp_dd = dynamic_data_of (gen_idx);
22651 int added_pinning_ratio = 0;
22652 int artificial_pinned_ratio = 0;
22654 if (dd_pinned_survived_size (temp_dd) != 0)
22656 added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22657 artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
22660 size_t padding_size =
22662 dd_padding_size (temp_dd);
22665 #endif //SHORT_PLUGS
22666 dprintf (1, ("gen%d: %Ix, %Ix(%Id), NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id",
22668 generation_allocation_start (temp_gen),
22669 generation_plan_allocation_start (temp_gen),
22670 (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)),
22671 generation_allocation_size (temp_gen),
22672 generation_pinned_allocation_compact_size (temp_gen),
22673 generation_pinned_allocation_sweep_size (temp_gen),
22674 dd_survived_size (temp_dd),
22675 dd_pinned_survived_size (temp_dd),
22676 added_pinning_ratio,
22677 artificial_pinned_ratio,
22678 (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)),
22681 #endif //SIMPLE_DPRINTF
22684 if (settings.condemned_generation == (max_generation - 1 ))
22686 size_t plan_gen2_size = generation_plan_size (max_generation);
22687 size_t growth = plan_gen2_size - old_gen2_size;
22689 generation* older_gen = generation_of (settings.condemned_generation + 1);
22690 size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space;
22691 size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated;
22692 size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated;
22693 size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated;
22697 dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, condemned alloc: %Id",
22698 growth, end_seg_allocated, condemned_allocated));
22700 maxgen_size_inc_p = true;
22704 dprintf (2, ("gen2 shrank %Id (end seg alloc: %Id, , condemned alloc: %Id, gen1 c alloc: %Id",
22705 (old_gen2_size - plan_gen2_size), end_seg_allocated, condemned_allocated,
22706 generation_condemned_allocated (generation_of (max_generation - 1))));
22709 dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id",
22710 r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen),
22711 r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen),
22712 r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen)));
22714 dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected)",
22715 free_list_allocated, rejected_free_space));
22717 maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
22718 maxgen_size_info->free_list_allocated = free_list_allocated;
22719 maxgen_size_info->free_list_rejected = rejected_free_space;
22720 maxgen_size_info->end_seg_allocated = end_seg_allocated;
22721 maxgen_size_info->condemned_allocated = condemned_allocated;
22722 maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance;
22723 maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance;
22725 #ifdef FREE_USAGE_STATS
22726 int free_list_efficiency = 0;
22727 if ((free_list_allocated + rejected_free_space) != 0)
22728 free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100);
22730 int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100);
22732 dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%",
22733 older_gen->gen_num,
22734 free_list_efficiency, running_free_list_efficiency));
22736 dprintf (1, ("gen2 free list change"));
22737 for (int j = 0; j < NUM_GEN_POWER2; j++)
22739 dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id",
22742 (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j],
22743 (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]),
22744 (generation_of(max_generation - 1))->gen_plugs[j]));
22746 #endif //FREE_USAGE_STATS
22749 size_t fragmentation =
22750 generation_fragmentation (generation_of (condemned_gen_number),
22752 heap_segment_allocated (ephemeral_heap_segment));
22754 dprintf (2,("Fragmentation: %Id", fragmentation));
22755 dprintf (2,("---- End of Plan phase ----"));
22758 finish = GetCycleCount32();
22759 plan_time = finish - start;
22762 // We may update write barrier code. We assume here EE has been suspended if we are on a GC thread.
22763 assert(IsGCInProgress());
22765 BOOL should_expand = FALSE;
22766 BOOL should_compact= FALSE;
22767 ephemeral_promotion = FALSE;
22770 if ((!settings.concurrent) &&
22771 !provisional_mode_triggered &&
22772 ((condemned_gen_number < max_generation) &&
22773 ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95))))
22775 dprintf (GTC_LOG, ("gen0 reduction count is %d, condemning %d, mem load %d",
22776 settings.gen0_reduction_count,
22777 condemned_gen_number,
22778 settings.entry_memory_load));
22779 should_compact = TRUE;
22781 get_gc_data_per_heap()->set_mechanism (gc_heap_compact,
22782 ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load));
22784 if ((condemned_gen_number >= (max_generation - 1)) &&
22785 dt_low_ephemeral_space_p (tuning_deciding_expansion))
22787 dprintf (GTC_LOG, ("Not enough space for all ephemeral generations with compaction"));
22788 should_expand = TRUE;
22794 should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand);
22799 #ifdef FEATURE_LOH_COMPACTION
22800 loh_compacted_p = FALSE;
22801 #endif //FEATURE_LOH_COMPACTION
22803 if (condemned_gen_number == max_generation)
22805 #ifdef FEATURE_LOH_COMPACTION
22806 if (settings.loh_compaction)
22810 should_compact = TRUE;
22811 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
22812 loh_compacted_p = TRUE;
22817 if ((heap_number == 0) && (loh_pinned_queue))
22819 loh_pinned_queue_decay--;
22821 if (!loh_pinned_queue_decay)
22823 delete loh_pinned_queue;
22824 loh_pinned_queue = 0;
22829 if (!loh_compacted_p)
22830 #endif //FEATURE_LOH_COMPACTION
22832 GCToEEInterface::DiagWalkLOHSurvivors(__this);
22833 sweep_large_objects();
22838 settings.loh_compaction = FALSE;
22841 #ifdef MULTIPLE_HEAPS
22843 new_heap_segment = NULL;
22845 if (should_compact && should_expand)
22846 gc_policy = policy_expand;
22847 else if (should_compact)
22848 gc_policy = policy_compact;
22850 gc_policy = policy_sweep;
22852 //vote for result of should_compact
22853 dprintf (3, ("Joining for compaction decision"));
22854 gc_t_join.join(this, gc_join_decide_on_compaction);
22855 if (gc_t_join.joined())
22857 //safe place to delete large heap segments
22858 if (condemned_gen_number == max_generation)
22860 for (int i = 0; i < n_heaps; i++)
22862 g_heaps [i]->rearrange_large_heap_segments ();
22866 if (maxgen_size_inc_p && provisional_mode_triggered)
22868 pm_trigger_full_gc = true;
22869 dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
22873 settings.demotion = FALSE;
22874 int pol_max = policy_sweep;
22875 #ifdef GC_CONFIG_DRIVEN
22876 BOOL is_compaction_mandatory = FALSE;
22877 #endif //GC_CONFIG_DRIVEN
22880 for (i = 0; i < n_heaps; i++)
22882 if (pol_max < g_heaps[i]->gc_policy)
22883 pol_max = policy_compact;
22884 // set the demotion flag is any of the heap has demotion
22885 if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
22887 (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
22888 settings.demotion = TRUE;
22891 #ifdef GC_CONFIG_DRIVEN
22892 if (!is_compaction_mandatory)
22894 int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact);
22895 if (compact_reason >= 0)
22897 if (gc_heap_compact_reason_mandatory_p[compact_reason])
22898 is_compaction_mandatory = TRUE;
22901 #endif //GC_CONFIG_DRIVEN
22904 #ifdef GC_CONFIG_DRIVEN
22905 if (!is_compaction_mandatory)
22907 // If compaction is not mandatory we can feel free to change it to a sweeping GC.
22908 // Note that we may want to change this to only checking every so often instead of every single GC.
22909 if (should_do_sweeping_gc (pol_max >= policy_compact))
22911 pol_max = policy_sweep;
22915 if (pol_max == policy_sweep)
22916 pol_max = policy_compact;
22919 #endif //GC_CONFIG_DRIVEN
22921 for (i = 0; i < n_heaps; i++)
22923 if (pol_max > g_heaps[i]->gc_policy)
22924 g_heaps[i]->gc_policy = pol_max;
22925 //get the segment while we are serialized
22926 if (g_heaps[i]->gc_policy == policy_expand)
22928 g_heaps[i]->new_heap_segment =
22929 g_heaps[i]->soh_get_segment_to_expand();
22930 if (!g_heaps[i]->new_heap_segment)
22932 set_expand_in_full_gc (condemned_gen_number);
22933 //we are out of memory, cancel the expansion
22934 g_heaps[i]->gc_policy = policy_compact;
22939 BOOL is_full_compacting_gc = FALSE;
22941 if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation))
22943 full_gc_counts[gc_type_compacting]++;
22944 is_full_compacting_gc = TRUE;
22947 for (i = 0; i < n_heaps; i++)
22949 //copy the card and brick tables
22950 if (g_gc_card_table!= g_heaps[i]->card_table)
22952 g_heaps[i]->copy_brick_card_table();
22955 if (is_full_compacting_gc)
22957 g_heaps[i]->loh_alloc_since_cg = 0;
22962 //start all threads on the roots.
22963 dprintf(3, ("Starting all gc threads after compaction decision"));
22964 gc_t_join.restart();
22967 //reset the local variable accordingly
22968 should_compact = (gc_policy >= policy_compact);
22969 should_expand = (gc_policy >= policy_expand);
22971 #else //MULTIPLE_HEAPS
22973 //safe place to delete large heap segments
22974 if (condemned_gen_number == max_generation)
22976 rearrange_large_heap_segments ();
22979 if (maxgen_size_inc_p && provisional_mode_triggered)
22981 pm_trigger_full_gc = true;
22982 dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
22986 settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE);
22987 if (settings.demotion)
22988 get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
22990 #ifdef GC_CONFIG_DRIVEN
22991 BOOL is_compaction_mandatory = FALSE;
22992 int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
22993 if (compact_reason >= 0)
22994 is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason];
22996 if (!is_compaction_mandatory)
22998 if (should_do_sweeping_gc (should_compact))
22999 should_compact = FALSE;
23001 should_compact = TRUE;
23003 #endif //GC_CONFIG_DRIVEN
23005 if (should_compact && (condemned_gen_number == max_generation))
23007 full_gc_counts[gc_type_compacting]++;
23008 loh_alloc_since_cg = 0;
23011 #endif //MULTIPLE_HEAPS
23013 if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered)
23015 if ((settings.condemned_generation == (max_generation - 1)) &&
23016 ((settings.gc_index % 5) == 0))
23018 pm_trigger_full_gc = true;
23022 if (settings.condemned_generation == (max_generation - 1))
23024 if (provisional_mode_triggered)
23028 should_expand = FALSE;
23029 dprintf (GTC_LOG, ("h%d in PM cannot expand", heap_number));
23033 if (pm_trigger_full_gc)
23035 should_compact = FALSE;
23036 dprintf (GTC_LOG, ("h%d PM doing sweeping", heap_number));
23040 if (should_compact)
23042 dprintf (2,( "**** Doing Compacting GC ****"));
23046 #ifndef MULTIPLE_HEAPS
23047 heap_segment* new_heap_segment = soh_get_segment_to_expand();
23048 #endif //!MULTIPLE_HEAPS
23049 if (new_heap_segment)
23051 consing_gen = expand_heap(condemned_gen_number,
23056 // If we couldn't get a new segment, or we were able to
23057 // reserve one but no space to commit, we couldn't
23059 if (ephemeral_heap_segment != new_heap_segment)
23061 set_expand_in_full_gc (condemned_gen_number);
23062 should_expand = FALSE;
23065 generation_allocation_limit (condemned_gen1) =
23066 generation_allocation_pointer (condemned_gen1);
23067 if ((condemned_gen_number < max_generation))
23069 generation_allocator (older_gen)->commit_alloc_list_changes();
23071 // Fix the allocation area of the older generation
23072 fix_older_allocation_area (older_gen);
23074 assert (generation_allocation_segment (consing_gen) ==
23075 ephemeral_heap_segment);
23077 GCToEEInterface::DiagWalkSurvivors(__this);
23079 relocate_phase (condemned_gen_number, first_condemned_address);
23080 compact_phase (condemned_gen_number, first_condemned_address,
23081 (!settings.demotion && settings.promotion));
23082 fix_generation_bounds (condemned_gen_number, consing_gen);
23083 assert (generation_allocation_limit (youngest_generation) ==
23084 generation_allocation_pointer (youngest_generation));
23085 if (condemned_gen_number >= (max_generation -1))
23087 #ifdef MULTIPLE_HEAPS
23088 // this needs be serialized just because we have one
23089 // segment_standby_list/seg_table for all heaps. We should make it at least
23090 // so that when hoarding is not on we don't need this join because
23091 // decommitting memory can take a long time.
23092 //must serialize on deleting segments
23093 gc_t_join.join(this, gc_join_rearrange_segs_compaction);
23094 if (gc_t_join.joined())
23096 for (int i = 0; i < n_heaps; i++)
23098 g_heaps[i]->rearrange_heap_segments(TRUE);
23100 gc_t_join.restart();
23103 rearrange_heap_segments(TRUE);
23104 #endif //MULTIPLE_HEAPS
23108 //fix the start_segment for the ephemeral generations
23109 for (int i = 0; i < max_generation; i++)
23111 generation* gen = generation_of (i);
23112 generation_start_segment (gen) = ephemeral_heap_segment;
23113 generation_allocation_segment (gen) = ephemeral_heap_segment;
23119 #ifdef FEATURE_PREMORTEM_FINALIZATION
23120 finalize_queue->UpdatePromotedGenerations (condemned_gen_number,
23121 (!settings.demotion && settings.promotion));
23122 #endif // FEATURE_PREMORTEM_FINALIZATION
23124 #ifdef MULTIPLE_HEAPS
23125 dprintf(3, ("Joining after end of compaction"));
23126 gc_t_join.join(this, gc_join_adjust_handle_age_compact);
23127 if (gc_t_join.joined())
23128 #endif //MULTIPLE_HEAPS
23130 #ifdef MULTIPLE_HEAPS
23131 //join all threads to make sure they are synchronized
23132 dprintf(3, ("Restarting after Promotion granted"));
23133 gc_t_join.restart();
23134 #endif //MULTIPLE_HEAPS
23138 sc.thread_number = heap_number;
23139 sc.promotion = FALSE;
23140 sc.concurrent = FALSE;
23141 // new generations bounds are set can call this guy
23142 if (settings.promotion && !settings.demotion)
23144 dprintf (2, ("Promoting EE roots for gen %d",
23145 condemned_gen_number));
23146 GCScan::GcPromotionsGranted(condemned_gen_number,
23147 max_generation, &sc);
23149 else if (settings.demotion)
23151 dprintf (2, ("Demoting EE roots for gen %d",
23152 condemned_gen_number));
23153 GCScan::GcDemote (condemned_gen_number, max_generation, &sc);
23158 gen0_big_free_spaces = 0;
23160 reset_pinned_queue_bos();
23161 unsigned int gen_number = min (max_generation, 1 + condemned_gen_number);
23162 generation* gen = generation_of (gen_number);
23163 uint8_t* low = generation_allocation_start (generation_of (gen_number-1));
23164 uint8_t* high = heap_segment_allocated (ephemeral_heap_segment);
23166 while (!pinned_plug_que_empty_p())
23168 mark* m = pinned_plug_of (deque_pinned_plug());
23169 size_t len = pinned_len (m);
23170 uint8_t* arr = (pinned_plug (m) - len);
23171 dprintf(3,("free [%Ix %Ix[ pin",
23172 (size_t)arr, (size_t)arr + len));
23175 assert (len >= Align (min_obj_size));
23176 make_unused_array (arr, len);
23177 // fix fully contained bricks + first one
23178 // if the array goes beyond the first brick
23179 size_t start_brick = brick_of (arr);
23180 size_t end_brick = brick_of (arr + len);
23181 if (end_brick != start_brick)
23184 ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix",
23185 start_brick, end_brick, (size_t)arr));
23186 set_brick (start_brick,
23187 arr - brick_address (start_brick));
23188 size_t brick = start_brick+1;
23189 while (brick < end_brick)
23191 set_brick (brick, start_brick - brick);
23196 //when we take an old segment to make the new
23197 //ephemeral segment. we can have a bunch of
23198 //pinned plugs out of order going to the new ephemeral seg
23199 //and then the next plugs go back to max_generation
23200 if ((heap_segment_mem (ephemeral_heap_segment) <= arr) &&
23201 (heap_segment_reserved (ephemeral_heap_segment) > arr))
23204 while ((low <= arr) && (high > arr))
23207 assert ((gen_number >= 1) || (demotion_low != MAX_PTR) ||
23208 settings.demotion || !settings.promotion);
23209 dprintf (3, ("new free list generation %d", gen_number));
23211 gen = generation_of (gen_number);
23212 if (gen_number >= 1)
23213 low = generation_allocation_start (generation_of (gen_number-1));
23220 dprintf (3, ("new free list generation %d", max_generation));
23221 gen_number = max_generation;
23222 gen = generation_of (gen_number);
23225 dprintf(3,("threading it into generation %d", gen_number));
23226 thread_gap (arr, len, gen);
23227 add_gen_free (gen_number, len);
23233 for (int x = 0; x <= max_generation; x++)
23235 assert (generation_allocation_start (generation_of (x)));
23239 if (!settings.demotion && settings.promotion)
23241 //clear card for generation 1. generation 0 is empty
23242 clear_card_for_addresses (
23243 generation_allocation_start (generation_of (1)),
23244 generation_allocation_start (generation_of (0)));
23246 if (settings.promotion && !settings.demotion)
23248 uint8_t* start = generation_allocation_start (youngest_generation);
23249 MAYBE_UNUSED_VAR(start);
23250 assert (heap_segment_allocated (ephemeral_heap_segment) ==
23251 (start + Align (size (start))));
23256 //force promotion for sweep
23257 settings.promotion = TRUE;
23258 settings.compaction = FALSE;
23261 sc.thread_number = heap_number;
23262 sc.promotion = FALSE;
23263 sc.concurrent = FALSE;
23265 dprintf (2, ("**** Doing Mark and Sweep GC****"));
23267 if ((condemned_gen_number < max_generation))
23269 generation_allocator (older_gen)->copy_from_alloc_list (r_free_list);
23270 generation_free_list_space (older_gen) = r_free_list_space;
23271 generation_free_obj_space (older_gen) = r_free_obj_space;
23272 generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated;
23273 generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated;
23274 generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated;
23275 generation_allocation_limit (older_gen) = r_allocation_limit;
23276 generation_allocation_pointer (older_gen) = r_allocation_pointer;
23277 generation_allocation_context_start_region (older_gen) = r_allocation_start_region;
23278 generation_allocation_segment (older_gen) = r_allocation_segment;
23281 if ((condemned_gen_number < max_generation))
23283 // Fix the allocation area of the older generation
23284 fix_older_allocation_area (older_gen);
23287 GCToEEInterface::DiagWalkSurvivors(__this);
23289 gen0_big_free_spaces = 0;
23290 make_free_lists (condemned_gen_number);
23291 recover_saved_pinned_info();
23293 #ifdef FEATURE_PREMORTEM_FINALIZATION
23294 finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
23295 #endif // FEATURE_PREMORTEM_FINALIZATION
23296 // MTHTS: leave single thread for HT processing on plan_phase
23297 #ifdef MULTIPLE_HEAPS
23298 dprintf(3, ("Joining after end of sweep"));
23299 gc_t_join.join(this, gc_join_adjust_handle_age_sweep);
23300 if (gc_t_join.joined())
23301 #endif //MULTIPLE_HEAPS
23303 GCScan::GcPromotionsGranted(condemned_gen_number,
23304 max_generation, &sc);
23305 if (condemned_gen_number >= (max_generation -1))
23307 #ifdef MULTIPLE_HEAPS
23308 for (int i = 0; i < n_heaps; i++)
23310 g_heaps[i]->rearrange_heap_segments(FALSE);
23313 rearrange_heap_segments(FALSE);
23314 #endif //MULTIPLE_HEAPS
23317 #ifdef MULTIPLE_HEAPS
23318 //join all threads to make sure they are synchronized
23319 dprintf(3, ("Restarting after Promotion granted"));
23320 gc_t_join.restart();
23321 #endif //MULTIPLE_HEAPS
23325 for (int x = 0; x <= max_generation; x++)
23327 assert (generation_allocation_start (generation_of (x)));
23331 //clear card for generation 1. generation 0 is empty
23332 clear_card_for_addresses (
23333 generation_allocation_start (generation_of (1)),
23334 generation_allocation_start (generation_of (0)));
23335 assert ((heap_segment_allocated (ephemeral_heap_segment) ==
23336 (generation_allocation_start (youngest_generation) +
23337 Align (min_obj_size))));
23340 //verify_partial();
23343 #pragma warning(pop)
23347 /*****************************
23348 Called after compact phase to fix all generation gaps
23349 ********************************/
23350 void gc_heap::fix_generation_bounds (int condemned_gen_number,
23351 generation* consing_gen)
23353 UNREFERENCED_PARAMETER(consing_gen);
23355 assert (generation_allocation_segment (consing_gen) ==
23356 ephemeral_heap_segment);
23358 //assign the planned allocation start to the generation
23359 int gen_number = condemned_gen_number;
23360 int bottom_gen = 0;
23362 while (gen_number >= bottom_gen)
23364 generation* gen = generation_of (gen_number);
23365 dprintf(3,("Fixing generation pointers for %Ix", gen_number));
23366 if ((gen_number < max_generation) && ephemeral_promotion)
23368 make_unused_array (saved_ephemeral_plan_start[gen_number],
23369 saved_ephemeral_plan_start_size[gen_number]);
23371 reset_allocation_pointers (gen, generation_plan_allocation_start (gen));
23372 make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen));
23373 dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen)));
23376 #ifdef MULTIPLE_HEAPS
23377 if (ephemeral_promotion)
23379 //we are creating a generation fault. set the cards.
23380 // and we are only doing this for multiple heaps because in the single heap scenario the
23381 // new ephemeral generations will be empty and there'll be no need to set cards for the
23382 // old ephemeral generations that got promoted into max_generation.
23383 ptrdiff_t delta = 0;
23384 #ifdef SEG_MAPPING_TABLE
23385 heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
23386 #else //SEG_MAPPING_TABLE
23387 heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
23388 #endif //SEG_MAPPING_TABLE
23390 assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
23391 size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
23392 size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]);
23393 while (card != end_card)
23399 #endif //MULTIPLE_HEAPS
23401 alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
23402 //reset the allocated size
23403 uint8_t* start = generation_allocation_start (youngest_generation);
23404 MAYBE_UNUSED_VAR(start);
23405 if (settings.promotion && !settings.demotion)
23407 assert ((start + Align (size (start))) ==
23408 heap_segment_plan_allocated(ephemeral_heap_segment));
23411 heap_segment_allocated(ephemeral_heap_segment)=
23412 heap_segment_plan_allocated(ephemeral_heap_segment);
23416 uint8_t* gc_heap::generation_limit (int gen_number)
23418 if (settings.promotion)
23420 if (gen_number <= 1)
23421 return heap_segment_reserved (ephemeral_heap_segment);
23423 return generation_allocation_start (generation_of ((gen_number - 2)));
23427 if (gen_number <= 0)
23428 return heap_segment_reserved (ephemeral_heap_segment);
23430 return generation_allocation_start (generation_of ((gen_number - 1)));
23434 BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number)
23436 uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
23437 size_t size = Align (min_obj_size)*(condemned_gen_number+1);
23438 assert ((start + size) <=
23439 heap_segment_reserved (ephemeral_heap_segment));
23440 if ((start + size) >
23441 heap_segment_committed (ephemeral_heap_segment))
23443 if (!grow_heap_segment (ephemeral_heap_segment, start + size))
23451 uint8_t* gc_heap::allocate_at_end (size_t size)
23453 uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
23454 size = Align (size);
23455 uint8_t* result = start;
23456 // only called to allocate a min obj so can't overflow here.
23457 assert ((start + size) <=
23458 heap_segment_reserved (ephemeral_heap_segment));
23459 //ensure_gap_allocation took care of it
23460 assert ((start + size) <=
23461 heap_segment_committed (ephemeral_heap_segment));
23462 heap_segment_allocated (ephemeral_heap_segment) += size;
23467 void gc_heap::make_free_lists (int condemned_gen_number)
23472 start = GetCycleCount32();
23475 //Promotion has to happen in sweep case.
23476 assert (settings.promotion);
23478 generation* condemned_gen = generation_of (condemned_gen_number);
23479 uint8_t* start_address = generation_allocation_start (condemned_gen);
23481 size_t current_brick = brick_of (start_address);
23482 heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
23484 PREFIX_ASSUME(current_heap_segment != NULL);
23486 uint8_t* end_address = heap_segment_allocated (current_heap_segment);
23487 size_t end_brick = brick_of (end_address-1);
23488 make_free_args args;
23489 args.free_list_gen_number = min (max_generation, 1 + condemned_gen_number);
23490 args.current_gen_limit = (((condemned_gen_number == max_generation)) ?
23492 (generation_limit (args.free_list_gen_number)));
23493 args.free_list_gen = generation_of (args.free_list_gen_number);
23494 args.highest_plug = 0;
23496 if ((start_address < end_address) ||
23497 (condemned_gen_number == max_generation))
23501 if ((current_brick > end_brick))
23503 if (args.current_gen_limit == MAX_PTR)
23505 //We had an empty segment
23506 //need to allocate the generation start
23508 generation* gen = generation_of (max_generation);
23510 heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
23512 PREFIX_ASSUME(start_seg != NULL);
23514 uint8_t* gap = heap_segment_mem (start_seg);
23516 generation_allocation_start (gen) = gap;
23517 heap_segment_allocated (start_seg) = gap + Align (min_obj_size);
23518 make_unused_array (gap, Align (min_obj_size));
23519 reset_allocation_pointers (gen, gap);
23520 dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix",
23521 max_generation, (size_t)gap));
23522 args.current_gen_limit = generation_limit (args.free_list_gen_number);
23524 if (heap_segment_next_rw (current_heap_segment))
23526 current_heap_segment = heap_segment_next_rw (current_heap_segment);
23527 current_brick = brick_of (heap_segment_mem (current_heap_segment));
23528 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
23538 int brick_entry = brick_table [ current_brick ];
23539 if ((brick_entry >= 0))
23541 make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args);
23542 dprintf(3,("Fixing brick entry %Ix to %Ix",
23543 current_brick, (size_t)args.highest_plug));
23544 set_brick (current_brick,
23545 (args.highest_plug - brick_address (current_brick)));
23549 if ((brick_entry > -32768))
23553 ptrdiff_t offset = brick_of (args.highest_plug) - current_brick;
23554 if ((brick_entry != -32767) && (! ((offset == brick_entry))))
23556 assert ((brick_entry == -1));
23559 //init to -1 for faster find_first_object
23560 set_brick (current_brick, -1);
23568 int bottom_gen = 0;
23569 args.free_list_gen_number--;
23570 while (args.free_list_gen_number >= bottom_gen)
23573 generation* gen2 = generation_of (args.free_list_gen_number);
23574 gap = allocate_at_end (Align(min_obj_size));
23575 generation_allocation_start (gen2) = gap;
23576 reset_allocation_pointers (gen2, gap);
23577 dprintf(3,("Fixing generation start of %d to: %Ix",
23578 args.free_list_gen_number, (size_t)gap));
23579 PREFIX_ASSUME(gap != NULL);
23580 make_unused_array (gap, Align (min_obj_size));
23582 args.free_list_gen_number--;
23585 //reset the allocated size
23586 uint8_t* start2 = generation_allocation_start (youngest_generation);
23587 alloc_allocated = start2 + Align (size (start2));
23591 finish = GetCycleCount32();
23592 sweep_time = finish - start;
23596 void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args)
23598 assert ((tree != NULL));
23600 int right_node = node_right_child (tree);
23601 int left_node = node_left_child (tree);
23602 args->highest_plug = 0;
23605 if (! (0 == left_node))
23607 make_free_list_in_brick (tree + left_node, args);
23611 uint8_t* plug = tree;
23612 size_t gap_size = node_gap_size (tree);
23613 uint8_t* gap = (plug - gap_size);
23614 dprintf (3,("Making free list %Ix len %d in %d",
23615 //dprintf (3,("F: %Ix len %Ix in %d",
23616 (size_t)gap, gap_size, args->free_list_gen_number));
23617 args->highest_plug = tree;
23619 if (is_plug_padded (plug))
23621 dprintf (3, ("%Ix padded", plug));
23622 clear_plug_padded (plug);
23624 #endif //SHORT_PLUGS
23627 if ((args->current_gen_limit == MAX_PTR) ||
23628 ((plug >= args->current_gen_limit) &&
23629 ephemeral_pointer_p (plug)))
23631 dprintf(3,(" Crossing Generation boundary at %Ix",
23632 (size_t)args->current_gen_limit));
23633 if (!(args->current_gen_limit == MAX_PTR))
23635 args->free_list_gen_number--;
23636 args->free_list_gen = generation_of (args->free_list_gen_number);
23638 dprintf(3,( " Fixing generation start of %d to: %Ix",
23639 args->free_list_gen_number, (size_t)gap));
23641 reset_allocation_pointers (args->free_list_gen, gap);
23642 args->current_gen_limit = generation_limit (args->free_list_gen_number);
23644 if ((gap_size >= (2*Align (min_obj_size))))
23646 dprintf(3,(" Splitting the gap in two %Id left",
23648 make_unused_array (gap, Align(min_obj_size));
23649 gap_size = (gap_size - Align(min_obj_size));
23650 gap = (gap + Align(min_obj_size));
23654 make_unused_array (gap, gap_size);
23661 thread_gap (gap, gap_size, args->free_list_gen);
23662 add_gen_free (args->free_list_gen->gen_num, gap_size);
23664 if (! (0 == right_node))
23666 make_free_list_in_brick (tree + right_node, args);
23672 void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation* gen)
23674 assert (generation_allocation_start (gen));
23677 if ((gen->gen_num == 0) && (size > CLR_SIZE))
23679 gen0_big_free_spaces += size;
23682 assert ((heap_segment_rw (generation_start_segment (gen))!=
23683 ephemeral_heap_segment) ||
23684 (gap_start > generation_allocation_start (gen)));
23685 // The beginning of a segment gap is not aligned
23686 assert (size >= Align (min_obj_size));
23687 make_unused_array (gap_start, size,
23688 (!settings.concurrent && (gen != youngest_generation)),
23689 (gen->gen_num == max_generation));
23690 dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size));
23692 if ((size >= min_free_list))
23694 generation_free_list_space (gen) += size;
23695 generation_allocator (gen)->thread_item (gap_start, size);
23699 generation_free_obj_space (gen) += size;
23704 void gc_heap::loh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen)
23706 assert (generation_allocation_start (gen));
23707 if (size >= min_free_list)
23709 generation_free_list_space (gen) += size;
23710 generation_allocator (gen)->thread_item_front (gap_start, size);
23714 void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp)
23716 dprintf (3, ("Making unused array [%Ix, %Ix[",
23717 (size_t)x, (size_t)(x+size)));
23718 assert (size >= Align (min_obj_size));
23720 //#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
23721 // check_batch_mark_array_bits (x, x+size);
23722 //#endif //VERIFY_HEAP && BACKGROUND_GC
23725 reset_memory (x, size);
23727 ((CObjectHeader*)x)->SetFree(size);
23732 #error "This won't work on big endian platforms"
23735 size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23737 if (size_as_object < size)
23740 // If the size is more than 4GB, we need to create multiple objects because of
23741 // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array
23742 // size is ignored in regular object size computation.
23744 uint8_t * tmp = x + size_as_object;
23745 size_t remaining_size = size - size_as_object;
23747 while (remaining_size > UINT32_MAX)
23749 // Make sure that there will be at least Align(min_obj_size) left
23750 size_t current_size = UINT32_MAX - get_alignment_constant (FALSE)
23751 - Align (min_obj_size, get_alignment_constant (FALSE));
23753 ((CObjectHeader*)tmp)->SetFree(current_size);
23755 remaining_size -= current_size;
23756 tmp += current_size;
23759 ((CObjectHeader*)tmp)->SetFree(remaining_size);
23764 clear_card_for_addresses (x, x + Align(size));
23767 // Clear memory set by make_unused_array.
23768 void gc_heap::clear_unused_array (uint8_t* x, size_t size)
23770 // Also clear the sync block
23771 *(((PTR_PTR)x)-1) = 0;
23773 ((CObjectHeader*)x)->UnsetFree();
23778 #error "This won't work on big endian platforms"
23781 // The memory could have been cleared in the meantime. We have to mirror the algorithm
23782 // from make_unused_array since we cannot depend on the object sizes in memory.
23783 size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;
23785 if (size_as_object < size)
23787 uint8_t * tmp = x + size_as_object;
23788 size_t remaining_size = size - size_as_object;
23790 while (remaining_size > UINT32_MAX)
23792 size_t current_size = UINT32_MAX - get_alignment_constant (FALSE)
23793 - Align (min_obj_size, get_alignment_constant (FALSE));
23795 ((CObjectHeader*)tmp)->UnsetFree();
23797 remaining_size -= current_size;
23798 tmp += current_size;
23801 ((CObjectHeader*)tmp)->UnsetFree();
23804 UNREFERENCED_PARAMETER(size);
23809 uint8_t* tree_search (uint8_t* tree, uint8_t* old_address)
23811 uint8_t* candidate = 0;
23815 if (tree < old_address)
23817 if ((cn = node_right_child (tree)) != 0)
23819 assert (candidate < tree);
23822 Prefetch (tree - 8);
23828 else if (tree > old_address)
23830 if ((cn = node_left_child (tree)) != 0)
23833 Prefetch (tree - 8);
23841 if (tree <= old_address)
23843 else if (candidate)
23849 #ifdef FEATURE_BASICFREEZE
23850 bool gc_heap::frozen_object_p (Object* obj)
23852 #ifdef MULTIPLE_HEAPS
23853 #ifdef SEG_MAPPING_TABLE
23854 heap_segment* pSegment = seg_mapping_table_segment_of((uint8_t*)obj);
23856 ptrdiff_t delta = 0;
23857 heap_segment* pSegment = segment_of ((uint8_t*)obj, delta);
23859 #else //MULTIPLE_HEAPS
23860 heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
23861 _ASSERTE(pSegment);
23862 #endif //MULTIPLE_HEAPS
23864 return heap_segment_read_only_p(pSegment);
23866 #endif // FEATURE_BASICFREEZE
23868 #ifdef FEATURE_REDHAWK
23869 // TODO: this was added on RH, we have not done perf runs to see if this is the right
23870 // thing to do for other versions of the CLR.
23872 #endif // FEATURE_REDHAWK
23873 void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
23875 uint8_t* old_address = *pold_address;
23876 if (!((old_address >= gc_low) && (old_address < gc_high)))
23877 #ifdef MULTIPLE_HEAPS
23879 UNREFERENCED_PARAMETER(thread);
23880 if (old_address == 0)
23882 gc_heap* hp = heap_of (old_address);
23883 if ((hp == this) ||
23884 !((old_address >= hp->gc_low) && (old_address < hp->gc_high)))
23887 #else //MULTIPLE_HEAPS
23889 #endif //MULTIPLE_HEAPS
23890 // delta translates old_address into address_gc (old_address);
23891 size_t brick = brick_of (old_address);
23892 int brick_entry = brick_table [ brick ];
23893 uint8_t* new_address = old_address;
23894 if (! ((brick_entry == 0)))
23898 while (brick_entry < 0)
23900 brick = (brick + brick_entry);
23901 brick_entry = brick_table [ brick ];
23903 uint8_t* old_loc = old_address;
23905 uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
23907 if ((node <= old_loc))
23908 new_address = (old_address + node_relocation_distance (node));
23911 if (node_left_p (node))
23913 dprintf(3,(" L: %Ix", (size_t)node));
23914 new_address = (old_address +
23915 (node_relocation_distance (node) +
23916 node_gap_size (node)));
23921 brick_entry = brick_table [ brick ];
23927 *pold_address = new_address;
23931 #ifdef FEATURE_LOH_COMPACTION
23932 if (loh_compacted_p
23933 #ifdef FEATURE_BASICFREEZE
23934 && !frozen_object_p((Object*)old_address)
23935 #endif // FEATURE_BASICFREEZE
23938 *pold_address = old_address + loh_node_relocation_distance (old_address);
23941 #endif //FEATURE_LOH_COMPACTION
23943 *pold_address = new_address;
23948 gc_heap::check_class_object_demotion (uint8_t* obj)
23950 #ifdef COLLECTIBLE_CLASS
23951 if (is_collectible(obj))
23953 check_class_object_demotion_internal (obj);
23956 UNREFERENCED_PARAMETER(obj);
23957 #endif //COLLECTIBLE_CLASS
23960 #ifdef COLLECTIBLE_CLASS
23962 gc_heap::check_class_object_demotion_internal (uint8_t* obj)
23964 if (settings.demotion)
23966 #ifdef MULTIPLE_HEAPS
23967 // We set the card without checking the demotion range 'cause at this point
23968 // the handle that points to the loader allocator object may or may not have
23969 // been relocated by other GC threads.
23970 set_card (card_of (obj));
23973 uint8_t* class_obj = get_class_object (obj);
23974 dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj));
23975 uint8_t* temp_class_obj = class_obj;
23976 uint8_t** temp = &temp_class_obj;
23977 relocate_address (temp THREAD_NUMBER_ARG);
23979 check_demotion_helper (temp, obj);
23980 #endif //MULTIPLE_HEAPS
23984 #endif //COLLECTIBLE_CLASS
23987 gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj)
23989 // detect if we are demoting an object
23990 if ((*pval < demotion_high) &&
23991 (*pval >= demotion_low))
23993 dprintf(3, ("setting card %Ix:%Ix",
23994 card_of((uint8_t*)pval),
23997 set_card (card_of (parent_obj));
23999 #ifdef MULTIPLE_HEAPS
24000 else if (settings.demotion)
24002 dprintf (4, ("Demotion active, computing heap_of object"));
24003 gc_heap* hp = heap_of (*pval);
24004 if ((*pval < hp->demotion_high) &&
24005 (*pval >= hp->demotion_low))
24007 dprintf(3, ("setting card %Ix:%Ix",
24008 card_of((uint8_t*)pval),
24011 set_card (card_of (parent_obj));
24014 #endif //MULTIPLE_HEAPS
24018 gc_heap::reloc_survivor_helper (uint8_t** pval)
24021 relocate_address (pval THREAD_NUMBER_ARG);
24023 check_demotion_helper (pval, (uint8_t*)pval);
24027 gc_heap::relocate_obj_helper (uint8_t* x, size_t s)
24030 if (contain_pointers (x))
24032 dprintf (3, ("$%Ix$", (size_t)x));
24034 go_through_object_nostart (method_table(x), x, s, pval,
24036 uint8_t* child = *pval;
24037 reloc_survivor_helper (pval);
24040 dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval));
24045 check_class_object_demotion (x);
24049 void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc)
24053 uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0);
24054 relocate_address (address_to_reloc THREAD_NUMBER_ARG);
24055 if (address_to_reloc)
24057 dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc));
24060 //check_demotion_helper (current_saved_info_to_relocate, (uint8_t*)pval);
24061 uint8_t* relocated_addr = *address_to_reloc;
24062 if ((relocated_addr < demotion_high) &&
24063 (relocated_addr >= demotion_low))
24065 dprintf (3, ("set card for location %Ix(%Ix)",
24066 (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
24068 set_card (card_of ((uint8_t*)address_to_set_card));
24070 #ifdef MULTIPLE_HEAPS
24071 else if (settings.demotion)
24073 gc_heap* hp = heap_of (relocated_addr);
24074 if ((relocated_addr < hp->demotion_high) &&
24075 (relocated_addr >= hp->demotion_low))
24077 dprintf (3, ("%Ix on h%d, set card for location %Ix(%Ix)",
24078 relocated_addr, hp->heap_number, (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));
24080 set_card (card_of ((uint8_t*)address_to_set_card));
24083 #endif //MULTIPLE_HEAPS
24086 void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry)
24089 uint8_t* plug = pinned_plug (pinned_plug_entry);
24090 uint8_t* pre_plug_start = plug - sizeof (plug_and_gap);
24091 // Note that we need to add one ptr size here otherwise we may not be able to find the relocated
24092 // address. Consider this scenario:
24093 // gen1 start | 3-ptr sized NP | PP
24095 // If we are asking for the reloc address of 0x10 we will AV in relocate_address because
24096 // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick
24097 // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree).
24098 pre_plug_start += sizeof (uint8_t*);
24099 uint8_t** old_address = &pre_plug_start;
24101 uint8_t* old_val = (old_address ? *old_address : 0);
24102 relocate_address (old_address THREAD_NUMBER_ARG);
24105 dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix",
24106 (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*))));
24109 pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*));
24113 void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned)
24116 uint8_t* plug = pinned_plug (pinned_plug_entry);
24120 //// Temporary - we just wanna make sure we are doing things right when padding is needed.
24121 //if ((x + s) < plug)
24123 // dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix",
24124 // x, (x + s), (plug- (x + s)), plug));
24125 // GCToOSInterface::DebugBreak();
24128 relocate_pre_plug_info (pinned_plug_entry);
24131 verify_pins_with_post_plug_info("after relocate_pre_plug_info");
24133 uint8_t* saved_plug_info_start = 0;
24134 uint8_t** saved_info_to_relocate = 0;
24138 saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start());
24139 saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
24143 saved_plug_info_start = (plug - sizeof (plug_and_gap));
24144 saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
24147 uint8_t** current_saved_info_to_relocate = 0;
24148 uint8_t* child = 0;
24150 dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end));
24152 if (contain_pointers (x))
24154 dprintf (3,("$%Ix$", (size_t)x));
24156 go_through_object_nostart (method_table(x), x, s, pval,
24158 dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval));
24160 if ((uint8_t*)pval >= end)
24162 current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**);
24163 child = *current_saved_info_to_relocate;
24164 reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate);
24165 dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix",
24166 (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate));
24170 reloc_survivor_helper (pval);
24175 check_class_object_demotion (x);
24178 void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
24181 while (x < plug_end)
24183 size_t s = size (x);
24184 uint8_t* next_obj = x + Align (s);
24185 Prefetch (next_obj);
24186 relocate_obj_helper (x, s);
24192 // if we expanded, right now we are not handling it as We are not saving the new reloc info.
24193 void gc_heap::verify_pins_with_post_plug_info (const char* msg)
24195 #if defined (_DEBUG) && defined (VERIFY_HEAP)
24196 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
24198 if (!verify_pinned_queue_p)
24201 if (settings.heap_expansion)
24204 for (size_t i = 0; i < mark_stack_tos; i++)
24206 mark& m = mark_stack_array[i];
24208 mark* pinned_plug_entry = pinned_plug_of(i);
24210 if (pinned_plug_entry->has_post_plug_info() &&
24211 pinned_plug_entry->post_short_p() &&
24212 (pinned_plug_entry->saved_post_plug_debug.gap != 1))
24214 uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap);
24215 // object after pin
24216 dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d",
24217 next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj),
24218 (int)node_left_child (next_obj), (int)node_right_child (next_obj)));
24220 size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug);
24222 if (node_gap_size (next_obj) != *post_plug_debug)
24224 dprintf (3, ("obj: %Ix gap should be %Ix but it is %Ix",
24225 next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj))));
24229 // can't do node_relocation_distance here as it clears the left bit.
24230 //if (node_relocation_distance (next_obj) != *post_plug_debug)
24231 if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug)
24233 dprintf (3, ("obj: %Ix reloc should be %Ix but it is %Ix",
24234 next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj))));
24237 if (node_left_child (next_obj) > 0)
24239 dprintf (3, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj))));
24245 dprintf (3, ("%s verified", msg));
24247 #else // _DEBUG && VERIFY_HEAP
24248 UNREFERENCED_PARAMETER(msg);
24249 #endif // _DEBUG && VERIFY_HEAP
24252 #ifdef COLLECTIBLE_CLASS
24253 // We don't want to burn another ptr size space for pinned plugs to record this so just
24254 // set the card unconditionally for collectible objects if we are demoting.
24256 gc_heap::unconditional_set_card_collectible (uint8_t* obj)
24258 if (settings.demotion)
24260 set_card (card_of (obj));
24263 #endif //COLLECTIBLE_CLASS
24265 void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry)
24268 uint8_t* p_plug = pinned_plug (pinned_plug_entry);
24269 BOOL is_pinned = (plug == p_plug);
24270 BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p());
24272 plug_end += sizeof (gap_reloc_pair);
24274 //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not")));
24275 dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not")));
24277 verify_pins_with_post_plug_info("begin reloc short surv");
24279 while (x < plug_end)
24281 if (check_short_obj_p && ((plug_end - x) < (DWORD)min_pre_pin_obj_size))
24283 dprintf (3, ("last obj %Ix is short", x));
24287 #ifdef COLLECTIBLE_CLASS
24288 if (pinned_plug_entry->post_short_collectible_p())
24289 unconditional_set_card_collectible (x);
24290 #endif //COLLECTIBLE_CLASS
24292 // Relocate the saved references based on bits set.
24293 uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start());
24294 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
24295 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
24297 if (pinned_plug_entry->post_short_bit_p (i))
24299 reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
24305 #ifdef COLLECTIBLE_CLASS
24306 if (pinned_plug_entry->pre_short_collectible_p())
24307 unconditional_set_card_collectible (x);
24308 #endif //COLLECTIBLE_CLASS
24310 relocate_pre_plug_info (pinned_plug_entry);
24312 // Relocate the saved references based on bits set.
24313 uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap));
24314 uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
24315 for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
24317 if (pinned_plug_entry->pre_short_bit_p (i))
24319 reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
24327 size_t s = size (x);
24328 uint8_t* next_obj = x + Align (s);
24329 Prefetch (next_obj);
24331 if (next_obj >= plug_end)
24333 dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix",
24334 next_obj, plug, plug_end));
24336 verify_pins_with_post_plug_info("before reloc short obj");
24338 relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned);
24342 relocate_obj_helper (x, s);
24349 verify_pins_with_post_plug_info("end reloc short surv");
24352 void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
24353 BOOL check_last_object_p,
24354 mark* pinned_plug_entry)
24356 //dprintf(3,("Relocating pointers in Plug [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
24357 dprintf (3,("RP: [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
24359 if (check_last_object_p)
24361 relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry);
24365 relocate_survivor_helper (plug, plug_end);
24369 void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args)
24371 assert ((tree != NULL));
24373 dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix",
24374 tree, args->last_plug,
24375 (tree + node_left_child (tree)),
24376 (tree + node_right_child (tree)),
24377 node_gap_size (tree)));
24379 if (node_left_child (tree))
24381 relocate_survivors_in_brick (tree + node_left_child (tree), args);
24384 uint8_t* plug = tree;
24385 BOOL has_post_plug_info_p = FALSE;
24386 BOOL has_pre_plug_info_p = FALSE;
24388 if (tree == oldest_pinned_plug)
24390 args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24391 &has_post_plug_info_p);
24392 assert (tree == pinned_plug (args->pinned_plug_entry));
24394 dprintf (3, ("tree is the oldest pin: %Ix", tree));
24396 if (args->last_plug)
24398 size_t gap_size = node_gap_size (tree);
24399 uint8_t* gap = (plug - gap_size);
24400 dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size));
24401 assert (gap_size >= Align (min_obj_size));
24402 uint8_t* last_plug_end = gap;
24404 BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24407 relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry);
24412 assert (!has_pre_plug_info_p);
24415 args->last_plug = plug;
24416 args->is_shortened = has_post_plug_info_p;
24417 if (has_post_plug_info_p)
24419 dprintf (3, ("setting %Ix as shortened", plug));
24421 dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0)));
24423 if (node_right_child (tree))
24425 relocate_survivors_in_brick (tree + node_right_child (tree), args);
24430 void gc_heap::update_oldest_pinned_plug()
24432 oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin()));
24435 void gc_heap::relocate_survivors (int condemned_gen_number,
24436 uint8_t* first_condemned_address)
24438 generation* condemned_gen = generation_of (condemned_gen_number);
24439 uint8_t* start_address = first_condemned_address;
24440 size_t current_brick = brick_of (start_address);
24441 heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24443 PREFIX_ASSUME(current_heap_segment != NULL);
24445 uint8_t* end_address = 0;
24447 reset_pinned_queue_bos();
24448 update_oldest_pinned_plug();
24450 end_address = heap_segment_allocated (current_heap_segment);
24452 size_t end_brick = brick_of (end_address - 1);
24453 relocate_args args;
24455 args.high = gc_high;
24456 args.is_shortened = FALSE;
24457 args.pinned_plug_entry = 0;
24458 args.last_plug = 0;
24461 if (current_brick > end_brick)
24463 if (args.last_plug)
24466 assert (!(args.is_shortened));
24467 relocate_survivors_in_plug (args.last_plug,
24468 heap_segment_allocated (current_heap_segment),
24470 args.pinned_plug_entry);
24473 args.last_plug = 0;
24476 if (heap_segment_next_rw (current_heap_segment))
24478 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24479 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24480 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24489 int brick_entry = brick_table [ current_brick ];
24491 if (brick_entry >= 0)
24493 relocate_survivors_in_brick (brick_address (current_brick) +
24502 void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
24504 if (check_last_object_p)
24506 size += sizeof (gap_reloc_pair);
24507 mark* entry = args->pinned_plug_entry;
24509 if (args->is_shortened)
24511 assert (entry->has_post_plug_info());
24512 entry->swap_post_plug_and_saved_for_profiler();
24516 assert (entry->has_pre_plug_info());
24517 entry->swap_pre_plug_and_saved_for_profiler();
24521 ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
24522 STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
24523 ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
24525 (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);
24527 if (check_last_object_p)
24529 mark* entry = args->pinned_plug_entry;
24531 if (args->is_shortened)
24533 entry->swap_post_plug_and_saved_for_profiler();
24537 entry->swap_pre_plug_and_saved_for_profiler();
24542 void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
24544 assert ((tree != NULL));
24545 if (node_left_child (tree))
24547 walk_relocation_in_brick (tree + node_left_child (tree), args);
24550 uint8_t* plug = tree;
24551 BOOL has_pre_plug_info_p = FALSE;
24552 BOOL has_post_plug_info_p = FALSE;
24554 if (tree == oldest_pinned_plug)
24556 args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
24557 &has_post_plug_info_p);
24558 assert (tree == pinned_plug (args->pinned_plug_entry));
24561 if (args->last_plug != 0)
24563 size_t gap_size = node_gap_size (tree);
24564 uint8_t* gap = (plug - gap_size);
24565 uint8_t* last_plug_end = gap;
24566 size_t last_plug_size = (last_plug_end - args->last_plug);
24567 dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix",
24568 tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
24570 BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
24571 if (!check_last_object_p)
24573 assert (last_plug_size >= Align (min_obj_size));
24576 walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
24580 assert (!has_pre_plug_info_p);
24583 dprintf (3, ("set args last plug to plug: %Ix", plug));
24584 args->last_plug = plug;
24585 args->is_shortened = has_post_plug_info_p;
24587 if (node_right_child (tree))
24589 walk_relocation_in_brick (tree + node_right_child (tree), args);
24593 void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
24595 generation* condemned_gen = generation_of (settings.condemned_generation);
24596 uint8_t* start_address = generation_allocation_start (condemned_gen);
24597 size_t current_brick = brick_of (start_address);
24598 heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
24600 PREFIX_ASSUME(current_heap_segment != NULL);
24602 reset_pinned_queue_bos();
24603 update_oldest_pinned_plug();
24604 size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24605 walk_relocate_args args;
24606 args.is_shortened = FALSE;
24607 args.pinned_plug_entry = 0;
24608 args.last_plug = 0;
24609 args.profiling_context = profiling_context;
24614 if (current_brick > end_brick)
24616 if (args.last_plug)
24618 walk_plug (args.last_plug,
24619 (heap_segment_allocated (current_heap_segment) - args.last_plug),
24622 args.last_plug = 0;
24624 if (heap_segment_next_rw (current_heap_segment))
24626 current_heap_segment = heap_segment_next_rw (current_heap_segment);
24627 current_brick = brick_of (heap_segment_mem (current_heap_segment));
24628 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
24637 int brick_entry = brick_table [ current_brick ];
24638 if (brick_entry >= 0)
24640 walk_relocation_in_brick (brick_address (current_brick) +
24649 void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
24651 if (type == walk_for_gc)
24652 walk_survivors_relocation (context, fn);
24653 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24654 else if (type == walk_for_bgc)
24655 walk_survivors_for_bgc (context, fn);
24656 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
24657 else if (type == walk_for_loh)
24658 walk_survivors_for_loh (context, fn);
24660 assert (!"unknown type!");
24663 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24664 void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
24666 // This should only be called for BGCs
24667 assert(settings.concurrent);
24669 heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
24671 BOOL small_object_segments = TRUE;
24672 int align_const = get_alignment_constant (small_object_segments);
24678 if (small_object_segments)
24680 //switch to large segment
24681 small_object_segments = FALSE;
24683 align_const = get_alignment_constant (small_object_segments);
24684 seg = heap_segment_rw (generation_start_segment (large_object_generation));
24686 PREFIX_ASSUME(seg != NULL);
24694 uint8_t* o = heap_segment_mem (seg);
24695 uint8_t* end = heap_segment_allocated (seg);
24699 if (method_table(o) == g_gc_pFreeObjectMethodTable)
24701 o += Align (size (o), align_const);
24705 // It's survived. Make a fake plug, starting at o,
24706 // and send the event
24708 uint8_t* plug_start = o;
24710 while (method_table(o) != g_gc_pFreeObjectMethodTable)
24712 o += Align (size (o), align_const);
24719 uint8_t* plug_end = o;
24723 0, // Reloc distance == 0 as this is non-compacting
24725 false, // Non-compacting
24729 seg = heap_segment_next (seg);
24732 #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
24734 void gc_heap::relocate_phase (int condemned_gen_number,
24735 uint8_t* first_condemned_address)
24738 sc.thread_number = heap_number;
24739 sc.promotion = FALSE;
24740 sc.concurrent = FALSE;
24746 start = GetCycleCount32();
24749 // %type% category = quote (relocate);
24750 dprintf (2,("---- Relocate phase -----"));
24752 #ifdef MULTIPLE_HEAPS
24753 //join all threads to make sure they are synchronized
24754 dprintf(3, ("Joining after end of plan"));
24755 gc_t_join.join(this, gc_join_begin_relocate_phase);
24756 if (gc_t_join.joined())
24757 #endif //MULTIPLE_HEAPS
24760 #ifdef MULTIPLE_HEAPS
24762 //join all threads to make sure they are synchronized
24763 dprintf(3, ("Restarting for relocation"));
24764 gc_t_join.restart();
24765 #endif //MULTIPLE_HEAPS
24768 dprintf(3,("Relocating roots"));
24769 GCScan::GcScanRoots(GCHeap::Relocate,
24770 condemned_gen_number, max_generation, &sc);
24772 verify_pins_with_post_plug_info("after reloc stack");
24774 #ifdef BACKGROUND_GC
24775 if (recursive_gc_sync::background_running_p())
24777 scan_background_roots (GCHeap::Relocate, heap_number, &sc);
24779 #endif //BACKGROUND_GC
24781 if (condemned_gen_number != max_generation)
24783 dprintf(3,("Relocating cross generation pointers"));
24784 mark_through_cards_for_segments (&gc_heap::relocate_address, TRUE);
24785 verify_pins_with_post_plug_info("after reloc cards");
24787 if (condemned_gen_number != max_generation)
24789 dprintf(3,("Relocating cross generation pointers for large objects"));
24790 mark_through_cards_for_large_objects (&gc_heap::relocate_address, TRUE);
24794 #ifdef FEATURE_LOH_COMPACTION
24795 if (loh_compacted_p)
24797 assert (settings.condemned_generation == max_generation);
24798 relocate_in_loh_compact();
24801 #endif //FEATURE_LOH_COMPACTION
24803 relocate_in_large_objects ();
24807 dprintf(3,("Relocating survivors"));
24808 relocate_survivors (condemned_gen_number,
24809 first_condemned_address);
24812 #ifdef FEATURE_PREMORTEM_FINALIZATION
24813 dprintf(3,("Relocating finalization data"));
24814 finalize_queue->RelocateFinalizationData (condemned_gen_number,
24816 #endif // FEATURE_PREMORTEM_FINALIZATION
24821 dprintf(3,("Relocating handle table"));
24822 GCScan::GcScanHandles(GCHeap::Relocate,
24823 condemned_gen_number, max_generation, &sc);
24826 #ifdef MULTIPLE_HEAPS
24827 //join all threads to make sure they are synchronized
24828 dprintf(3, ("Joining after end of relocation"));
24829 gc_t_join.join(this, gc_join_relocate_phase_done);
24831 #endif //MULTIPLE_HEAPS
24834 finish = GetCycleCount32();
24835 reloc_time = finish - start;
24838 dprintf(2,( "---- End of Relocate phase ----"));
24841 // This compares to see if tree is the current pinned plug and returns info
24842 // for this pinned plug. Also advances the pinned queue if that's the case.
24844 // We don't change the values of the plug info if tree is not the same as
24845 // the current pinned plug - the caller is responsible for setting the right
24846 // values to begin with.
24848 // POPO TODO: We are keeping this temporarily as this is also used by realloc
24849 // where it passes FALSE to deque_p, change it to use the same optimization
24850 // as relocate. Not as essential since realloc is already a slow path.
24851 mark* gc_heap::get_next_pinned_entry (uint8_t* tree,
24852 BOOL* has_pre_plug_info_p,
24853 BOOL* has_post_plug_info_p,
24856 if (!pinned_plug_que_empty_p())
24858 mark* oldest_entry = oldest_pin();
24859 uint8_t* oldest_plug = pinned_plug (oldest_entry);
24860 if (tree == oldest_plug)
24862 *has_pre_plug_info_p = oldest_entry->has_pre_plug_info();
24863 *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24867 deque_pinned_plug();
24870 dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d",
24872 (*has_pre_plug_info_p ? 1 : 0),
24873 (*has_post_plug_info_p ? 1 : 0)));
24875 return oldest_entry;
24882 // This also deques the oldest entry and update the oldest plug
24883 mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p,
24884 BOOL* has_post_plug_info_p)
24886 mark* oldest_entry = oldest_pin();
24887 *has_pre_plug_info_p = oldest_entry->has_pre_plug_info();
24888 *has_post_plug_info_p = oldest_entry->has_post_plug_info();
24890 deque_pinned_plug();
24891 update_oldest_pinned_plug();
24892 return oldest_entry;
24896 void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24899 copy_cards_for_addresses (dest, src, len);
24901 clear_card_for_addresses (dest, dest + len);
24904 // POPO TODO: We should actually just recover the artifically made gaps here..because when we copy
24905 // we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way
24906 // we won't need to individually recover each overwritten part of plugs.
24908 void gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
24912 #ifdef BACKGROUND_GC
24913 if (current_c_gc_state == c_gc_state_marking)
24915 //TODO: should look to see whether we should consider changing this
24916 // to copy a consecutive region of the mark array instead.
24917 copy_mark_bits_for_addresses (dest, src, len);
24919 #endif //BACKGROUND_GC
24920 //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24921 dprintf(3,(" mc: [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
24922 memcopy (dest - plug_skew, src - plug_skew, len);
24923 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24924 if (SoftwareWriteWatch::IsEnabledForGCHeap())
24926 // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC
24927 // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the
24928 // object at (src + len), so it can be ignored anyway.
24929 SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew);
24931 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
24932 copy_cards_range (dest, src, len, copy_cards_p);
24936 void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args)
24939 uint8_t* reloc_plug = plug + args->last_plug_relocation;
24941 if (check_last_object_p)
24943 size += sizeof (gap_reloc_pair);
24944 mark* entry = args->pinned_plug_entry;
24946 if (args->is_shortened)
24948 assert (entry->has_post_plug_info());
24949 entry->swap_post_plug_and_saved();
24953 assert (entry->has_pre_plug_info());
24954 entry->swap_pre_plug_and_saved();
24958 int old_brick_entry = brick_table [brick_of (plug)];
24960 assert (node_relocation_distance (plug) == args->last_plug_relocation);
24962 #ifdef FEATURE_STRUCTALIGN
24963 ptrdiff_t alignpad = node_alignpad(plug);
24966 make_unused_array (reloc_plug - alignpad, alignpad);
24967 if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug))
24969 // The alignment padding is straddling one or more bricks;
24970 // it has to be the last "object" of its first brick.
24971 fix_brick_to_highest (reloc_plug - alignpad, reloc_plug);
24974 #else // FEATURE_STRUCTALIGN
24975 size_t unused_arr_size = 0;
24976 BOOL already_padded_p = FALSE;
24978 if (is_plug_padded (plug))
24980 already_padded_p = TRUE;
24981 clear_plug_padded (plug);
24982 unused_arr_size = Align (min_obj_size);
24984 #endif //SHORT_PLUGS
24985 if (node_realigned (plug))
24987 unused_arr_size += switch_alignment_size (already_padded_p);
24990 if (unused_arr_size != 0)
24992 make_unused_array (reloc_plug - unused_arr_size, unused_arr_size);
24994 if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug))
24996 dprintf (3, ("fix B for padding: %Id: %Ix->%Ix",
24997 unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug));
24998 // The alignment padding is straddling one or more bricks;
24999 // it has to be the last "object" of its first brick.
25000 fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug);
25003 #endif // FEATURE_STRUCTALIGN
25006 if (is_plug_padded (plug))
25008 make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size));
25010 if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug))
25012 // The alignment padding is straddling one or more bricks;
25013 // it has to be the last "object" of its first brick.
25014 fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug);
25017 #endif //SHORT_PLUGS
25019 gcmemcopy (reloc_plug, plug, size, args->copy_cards_p);
25021 if (args->check_gennum_p)
25023 int src_gennum = args->src_gennum;
25024 if (src_gennum == -1)
25026 src_gennum = object_gennum (plug);
25029 int dest_gennum = object_gennum_plan (reloc_plug);
25031 if (src_gennum < dest_gennum)
25033 generation_allocation_size (generation_of (dest_gennum)) += size;
25037 size_t current_reloc_brick = args->current_compacted_brick;
25039 if (brick_of (reloc_plug) != current_reloc_brick)
25041 dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix",
25042 current_reloc_brick, brick_of (reloc_plug)));
25044 if (args->before_last_plug)
25046 dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)",
25047 current_reloc_brick,
25048 args->before_last_plug,
25049 (args->before_last_plug - brick_address (current_reloc_brick))));
25052 set_brick (current_reloc_brick,
25053 args->before_last_plug - brick_address (current_reloc_brick));
25056 current_reloc_brick = brick_of (reloc_plug);
25058 size_t end_brick = brick_of (reloc_plug + size-1);
25059 if (end_brick != current_reloc_brick)
25061 // The plug is straddling one or more bricks
25062 // It has to be the last plug of its first brick
25063 dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)",
25064 current_reloc_brick, (size_t)reloc_plug,
25065 (reloc_plug - brick_address (current_reloc_brick))));
25068 set_brick (current_reloc_brick,
25069 reloc_plug - brick_address (current_reloc_brick));
25071 // update all intervening brick
25072 size_t brick = current_reloc_brick + 1;
25073 dprintf (3,("setting intervening bricks %Ix->%Ix to -1",
25074 brick, (end_brick - 1)));
25075 while (brick < end_brick)
25077 set_brick (brick, -1);
25080 // code last brick offset as a plug address
25081 args->before_last_plug = brick_address (end_brick) -1;
25082 current_reloc_brick = end_brick;
25083 dprintf (3, ("setting before last to %Ix, last brick to %Ix",
25084 args->before_last_plug, current_reloc_brick));
25088 dprintf (3, ("still in the same brick: %Ix", end_brick));
25089 args->before_last_plug = reloc_plug;
25091 args->current_compacted_brick = current_reloc_brick;
25093 if (check_last_object_p)
25095 mark* entry = args->pinned_plug_entry;
25097 if (args->is_shortened)
25099 entry->swap_post_plug_and_saved();
25103 entry->swap_pre_plug_and_saved();
25108 void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args)
25110 assert (tree != NULL);
25111 int left_node = node_left_child (tree);
25112 int right_node = node_right_child (tree);
25113 ptrdiff_t relocation = node_relocation_distance (tree);
25119 dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node)));
25120 compact_in_brick ((tree + left_node), args);
25123 uint8_t* plug = tree;
25124 BOOL has_pre_plug_info_p = FALSE;
25125 BOOL has_post_plug_info_p = FALSE;
25127 if (tree == oldest_pinned_plug)
25129 args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
25130 &has_post_plug_info_p);
25131 assert (tree == pinned_plug (args->pinned_plug_entry));
25134 if (args->last_plug != 0)
25136 size_t gap_size = node_gap_size (tree);
25137 uint8_t* gap = (plug - gap_size);
25138 uint8_t* last_plug_end = gap;
25139 size_t last_plug_size = (last_plug_end - args->last_plug);
25140 dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix",
25141 tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));
25143 BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
25144 if (!check_last_object_p)
25146 assert (last_plug_size >= Align (min_obj_size));
25149 compact_plug (args->last_plug, last_plug_size, check_last_object_p, args);
25153 assert (!has_pre_plug_info_p);
25156 dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation));
25157 args->last_plug = plug;
25158 args->last_plug_relocation = relocation;
25159 args->is_shortened = has_post_plug_info_p;
25163 dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node)));
25164 compact_in_brick ((tree + right_node), args);
25168 void gc_heap::recover_saved_pinned_info()
25170 reset_pinned_queue_bos();
25172 while (!(pinned_plug_que_empty_p()))
25174 mark* oldest_entry = oldest_pin();
25175 oldest_entry->recover_plug_info();
25176 #ifdef GC_CONFIG_DRIVEN
25177 if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info())
25178 record_interesting_data_point (idp_pre_and_post_pin);
25179 else if (oldest_entry->has_pre_plug_info())
25180 record_interesting_data_point (idp_pre_pin);
25181 else if (oldest_entry->has_post_plug_info())
25182 record_interesting_data_point (idp_post_pin);
25183 #endif //GC_CONFIG_DRIVEN
25185 deque_pinned_plug();
25189 void gc_heap::compact_phase (int condemned_gen_number,
25190 uint8_t* first_condemned_address,
25193 // %type% category = quote (compact);
25197 start = GetCycleCount32();
25199 generation* condemned_gen = generation_of (condemned_gen_number);
25200 uint8_t* start_address = first_condemned_address;
25201 size_t current_brick = brick_of (start_address);
25202 heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
25204 PREFIX_ASSUME(current_heap_segment != NULL);
25206 reset_pinned_queue_bos();
25207 update_oldest_pinned_plug();
25209 BOOL reused_seg = expand_reused_seg_p();
25212 for (int i = 1; i <= max_generation; i++)
25214 generation_allocation_size (generation_of (i)) = 0;
25218 uint8_t* end_address = heap_segment_allocated (current_heap_segment);
25220 size_t end_brick = brick_of (end_address-1);
25222 args.last_plug = 0;
25223 args.before_last_plug = 0;
25224 args.current_compacted_brick = ~((size_t)1);
25225 args.is_shortened = FALSE;
25226 args.pinned_plug_entry = 0;
25227 args.copy_cards_p = (condemned_gen_number >= 1) || !clear_cards;
25228 args.check_gennum_p = reused_seg;
25229 if (args.check_gennum_p)
25231 args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
25234 dprintf (2,("---- Compact Phase: %Ix(%Ix)----",
25235 first_condemned_address, brick_of (first_condemned_address)));
25237 #ifdef MULTIPLE_HEAPS
25239 if (gc_t_join.joined())
25241 #endif //MULTIPLE_HEAPS
25243 #ifdef MULTIPLE_HEAPS
25244 dprintf(3, ("Restarting for compaction"));
25245 gc_t_join.restart();
25247 #endif //MULTIPLE_HEAPS
25249 reset_pinned_queue_bos();
25251 #ifdef FEATURE_LOH_COMPACTION
25252 if (loh_compacted_p)
25256 #endif //FEATURE_LOH_COMPACTION
25258 if ((start_address < end_address) ||
25259 (condemned_gen_number == max_generation))
25263 if (current_brick > end_brick)
25265 if (args.last_plug != 0)
25267 dprintf (3, ("compacting last plug: %Ix", args.last_plug))
25268 compact_plug (args.last_plug,
25269 (heap_segment_allocated (current_heap_segment) - args.last_plug),
25274 if (heap_segment_next_rw (current_heap_segment))
25276 current_heap_segment = heap_segment_next_rw (current_heap_segment);
25277 current_brick = brick_of (heap_segment_mem (current_heap_segment));
25278 end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
25279 args.last_plug = 0;
25280 if (args.check_gennum_p)
25282 args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
25288 if (args.before_last_plug !=0)
25290 dprintf (3, ("Fixing last brick %Ix to point to plug %Ix",
25291 args.current_compacted_brick, (size_t)args.before_last_plug));
25292 assert (args.current_compacted_brick != ~1u);
25293 set_brick (args.current_compacted_brick,
25294 args.before_last_plug - brick_address (args.current_compacted_brick));
25300 int brick_entry = brick_table [ current_brick ];
25301 dprintf (3, ("B: %Ix(%Ix)->%Ix",
25302 current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1)));
25304 if (brick_entry >= 0)
25306 compact_in_brick ((brick_address (current_brick) + brick_entry -1),
25315 recover_saved_pinned_info();
25318 finish = GetCycleCount32();
25319 compact_time = finish - start;
25322 concurrent_print_time_delta ("compact end");
25324 dprintf(2,("---- End of Compact phase ----"));
25327 #ifdef MULTIPLE_HEAPS
25330 #pragma warning(push)
25331 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
25333 void gc_heap::gc_thread_stub (void* arg)
25335 gc_heap* heap = (gc_heap*)arg;
25336 if (!gc_thread_no_affinitize_p)
25338 // We are about to set affinity for GC threads. It is a good place to set up NUMA and
25339 // CPU groups because the process mask, processor number, and group number are all
25340 // readily available.
25341 set_thread_affinity_for_heap(heap->heap_number);
25344 // server GC threads run at a higher priority than normal.
25345 GCToOSInterface::BoostThreadPriority();
25346 _alloca (256*heap->heap_number);
25347 heap->gc_thread_function();
25350 #pragma warning(pop)
25353 #endif //MULTIPLE_HEAPS
25355 #ifdef BACKGROUND_GC
25358 #pragma warning(push)
25359 #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
25361 void gc_heap::bgc_thread_stub (void* arg)
25363 gc_heap* heap = (gc_heap*)arg;
25364 heap->bgc_thread = GCToEEInterface::GetThread();
25365 assert(heap->bgc_thread != nullptr);
25366 heap->bgc_thread_function();
25369 #pragma warning(pop)
25372 #endif //BACKGROUND_GC
25374 /*------------------ Background GC ----------------------------*/
25376 #ifdef BACKGROUND_GC
25378 void gc_heap::background_drain_mark_list (int thread)
25380 UNREFERENCED_PARAMETER(thread);
25382 size_t saved_c_mark_list_index = c_mark_list_index;
25384 if (saved_c_mark_list_index)
25386 concurrent_print_time_delta ("SML");
25388 while (c_mark_list_index != 0)
25390 size_t current_index = c_mark_list_index - 1;
25391 uint8_t* o = c_mark_list [current_index];
25392 background_mark_object (o THREAD_NUMBER_ARG);
25393 c_mark_list_index--;
25395 if (saved_c_mark_list_index)
25398 concurrent_print_time_delta ("EML");
25401 fire_drain_mark_list_event (saved_c_mark_list_index);
25405 // The background GC version of scan_dependent_handles (see that method for a more in-depth comment).
25406 #ifdef MULTIPLE_HEAPS
25407 // Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning
25408 // them. So we can use the same static variables.
25409 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
25411 // Whenever we call this method there may have been preceding object promotions. So set
25412 // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
25413 // based on the how the scanning proceeded).
25414 s_fUnscannedPromotions = TRUE;
25416 // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
25417 // the state of this thread's portion of the dependent handle table. That's because promotions on other
25418 // threads could cause handle promotions to become necessary here. Even if there are definitely no more
25419 // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
25420 // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
25421 // as all the others or they'll get out of step).
25424 // The various worker threads are all currently racing in this code. We need to work out if at least
25425 // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
25426 // dependent handle table when both of the following conditions apply:
25427 // 1) At least one (arbitrary) object might have been promoted since the last scan (because if this
25428 // object happens to correspond to a primary in one of our handles we might potentially have to
25429 // promote the associated secondary).
25430 // 2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
25432 // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
25433 // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
25434 // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
25435 // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
25436 // follows below. Note that we can't read this outside of the join since on any iteration apart from
25437 // the first threads will be racing between reading this value and completing their previous
25438 // iteration's table scan.
25440 // The second condition is tracked by the dependent handle code itself on a per worker thread basis
25441 // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
25442 // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
25443 // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
25444 // we're safely joined.
25445 if (GCScan::GcDhUnpromotedHandlesExist(sc))
25446 s_fUnpromotedHandles = TRUE;
25448 // Synchronize all the threads so we can read our state variables safely. The following shared
25449 // variable (indicating whether we should scan the tables or terminate the loop) will be set by a
25450 // single thread inside the join.
25451 bgc_t_join.join(this, gc_join_scan_dependent_handles);
25452 if (bgc_t_join.joined())
25454 // We're synchronized so it's safe to read our shared state variables. We update another shared
25455 // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
25456 // the loop. We scan if there has been at least one object promotion since last time and at least
25457 // one thread has a dependent handle table with a potential handle promotion possible.
25458 s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;
25460 // Reset our shared state variables (ready to be set again on this scan or with a good initial
25461 // value for the next call if we're terminating the loop).
25462 s_fUnscannedPromotions = FALSE;
25463 s_fUnpromotedHandles = FALSE;
25465 if (!s_fScanRequired)
25467 uint8_t* all_heaps_max = 0;
25468 uint8_t* all_heaps_min = MAX_PTR;
25470 for (i = 0; i < n_heaps; i++)
25472 if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
25473 all_heaps_max = g_heaps[i]->background_max_overflow_address;
25474 if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
25475 all_heaps_min = g_heaps[i]->background_min_overflow_address;
25477 for (i = 0; i < n_heaps; i++)
25479 g_heaps[i]->background_max_overflow_address = all_heaps_max;
25480 g_heaps[i]->background_min_overflow_address = all_heaps_min;
25484 // Restart all the workers.
25485 dprintf(2, ("Starting all gc thread mark stack overflow processing"));
25486 bgc_t_join.restart();
25489 // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25490 // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
25491 // global flag indicating that at least one object promotion may have occurred (the usual comment
25492 // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
25493 // exit the method since we unconditionally set this variable on method entry anyway).
25494 if (background_process_mark_overflow (sc->concurrent))
25495 s_fUnscannedPromotions = TRUE;
25497 // If we decided that no scan was required we can terminate the loop now.
25498 if (!s_fScanRequired)
25501 // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
25502 // processed before we start scanning dependent handle tables (if overflows remain while we scan we
25503 // could miss noting the promotion of some primary objects).
25504 bgc_t_join.join(this, gc_join_rescan_dependent_handles);
25505 if (bgc_t_join.joined())
25507 // Restart all the workers.
25508 dprintf(3, ("Starting all gc thread for dependent handle promotion"));
25509 bgc_t_join.restart();
25512 // If the portion of the dependent handle table managed by this worker has handles that could still be
25513 // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
25514 // could require a rescan of handles on this or other workers.
25515 if (GCScan::GcDhUnpromotedHandlesExist(sc))
25516 if (GCScan::GcDhReScan(sc))
25517 s_fUnscannedPromotions = TRUE;
25521 void gc_heap::background_scan_dependent_handles (ScanContext *sc)
25523 // Whenever we call this method there may have been preceding object promotions. So set
25524 // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
25525 // based on the how the scanning proceeded).
25526 bool fUnscannedPromotions = true;
25528 // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a
25529 // scan without performing any new promotions.
25530 while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
25532 // On each iteration of the loop start with the assumption that no further objects have been promoted.
25533 fUnscannedPromotions = false;
25535 // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
25536 // being visible. If there was an overflow (background_process_mark_overflow returned true) then
25537 // additional objects now appear to be promoted and we should set the flag.
25538 if (background_process_mark_overflow (sc->concurrent))
25539 fUnscannedPromotions = true;
25541 // Perform the scan and set the flag if any promotions resulted.
25542 if (GCScan::GcDhReScan (sc))
25543 fUnscannedPromotions = true;
25546 // Perform a last processing of any overflowed mark stack.
25547 background_process_mark_overflow (sc->concurrent);
25549 #endif //MULTIPLE_HEAPS
25551 void gc_heap::recover_bgc_settings()
25553 if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
25555 dprintf (2, ("restoring bgc settings"));
25556 settings = saved_bgc_settings;
25557 GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
25561 void gc_heap::allow_fgc()
25563 assert (bgc_thread == GCToEEInterface::GetThread());
25564 bool bToggleGC = false;
25566 if (g_fSuspensionPending > 0)
25568 bToggleGC = GCToEEInterface::EnablePreemptiveGC();
25571 GCToEEInterface::DisablePreemptiveGC();
25576 BOOL gc_heap::should_commit_mark_array()
25578 return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
25581 void gc_heap::clear_commit_flag()
25583 generation* gen = generation_of (max_generation);
25584 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25589 if (gen != large_object_generation)
25591 gen = large_object_generation;
25592 seg = heap_segment_in_range (generation_start_segment (gen));
25600 if (seg->flags & heap_segment_flags_ma_committed)
25602 seg->flags &= ~heap_segment_flags_ma_committed;
25605 if (seg->flags & heap_segment_flags_ma_pcommitted)
25607 seg->flags &= ~heap_segment_flags_ma_pcommitted;
25610 seg = heap_segment_next (seg);
25614 void gc_heap::clear_commit_flag_global()
25616 #ifdef MULTIPLE_HEAPS
25617 for (int i = 0; i < n_heaps; i++)
25619 g_heaps[i]->clear_commit_flag();
25622 clear_commit_flag();
25623 #endif //MULTIPLE_HEAPS
25626 void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25629 size_t markw = mark_word_of (begin);
25630 size_t markw_end = mark_word_of (end);
25632 while (markw < markw_end)
25634 if (mark_array_addr[markw])
25636 dprintf (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
25637 markw, mark_array_addr[markw], mark_word_address (markw)));
25643 UNREFERENCED_PARAMETER(begin);
25644 UNREFERENCED_PARAMETER(end);
25645 UNREFERENCED_PARAMETER(mark_array_addr);
25649 void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
25651 verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
25654 BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
25656 uint32_t* new_card_table,
25657 uint8_t* new_lowest_address)
25659 UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
25661 uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25662 uint8_t* end = heap_segment_reserved (seg);
25664 uint8_t* lowest = hp->background_saved_lowest_address;
25665 uint8_t* highest = hp->background_saved_highest_address;
25667 uint8_t* commit_start = NULL;
25668 uint8_t* commit_end = NULL;
25669 size_t commit_flag = 0;
25671 if ((highest >= start) &&
25674 if ((start >= lowest) && (end <= highest))
25676 dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25677 start, end, lowest, highest));
25678 commit_flag = heap_segment_flags_ma_committed;
25682 dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
25683 start, end, lowest, highest));
25684 commit_flag = heap_segment_flags_ma_pcommitted;
25687 commit_start = max (lowest, start);
25688 commit_end = min (highest, end);
25690 if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array))
25695 if (new_card_table == 0)
25697 new_card_table = g_gc_card_table;
25700 if (hp->card_table != new_card_table)
25702 if (new_lowest_address == 0)
25704 new_lowest_address = g_gc_lowest_address;
25707 uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
25708 uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address));
25710 dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix",
25711 hp->card_table, new_card_table,
25712 hp->mark_array, ma));
25714 if (!commit_mark_array_by_range (commit_start, commit_end, ma))
25720 seg->flags |= commit_flag;
25726 BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
25728 size_t beg_word = mark_word_of (begin);
25729 size_t end_word = mark_word_of (align_on_mark_word (end));
25730 uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]);
25731 uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]);
25732 size_t size = (size_t)(commit_end - commit_start);
25734 #ifdef SIMPLE_DPRINTF
25735 dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)",
25737 beg_word, end_word,
25738 (end_word - beg_word) * sizeof (uint32_t),
25739 &mark_array_addr[beg_word],
25740 &mark_array_addr[end_word],
25741 (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]),
25742 commit_start, commit_end,
25744 #endif //SIMPLE_DPRINTF
25746 if (virtual_commit (commit_start, size))
25748 // We can only verify the mark array is cleared from begin to end, the first and the last
25749 // page aren't necessarily all cleared 'cause they could be used by other segments or
25751 verify_mark_array_cleared (begin, end, mark_array_addr);
25756 dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t)));
25761 BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr)
25763 uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25764 uint8_t* end = heap_segment_reserved (seg);
25766 #ifdef MULTIPLE_HEAPS
25767 uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address;
25768 uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address;
25770 uint8_t* lowest = background_saved_lowest_address;
25771 uint8_t* highest = background_saved_highest_address;
25772 #endif //MULTIPLE_HEAPS
25774 if ((highest >= start) &&
25777 start = max (lowest, start);
25778 end = min (highest, end);
25779 if (!commit_mark_array_by_range (start, end, new_mark_array_addr))
25788 BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr)
25790 dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix",
25792 heap_segment_reserved (seg),
25794 uint8_t* start = (heap_segment_read_only_p (seg) ? heap_segment_mem (seg) : (uint8_t*)seg);
25796 return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
25799 BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
25801 UNREFERENCED_PARAMETER(mark_array_addr);
25803 dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix",
25804 lowest_address, highest_address, mark_array));
25806 generation* gen = generation_of (max_generation);
25807 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25812 if (gen != large_object_generation)
25814 gen = large_object_generation;
25815 seg = heap_segment_in_range (generation_start_segment (gen));
25823 dprintf (GC_TABLE_LOG, ("seg: %Ix, flags: %Id", seg, seg->flags));
25825 if (!(seg->flags & heap_segment_flags_ma_committed))
25827 // For ro segments they could always be only partially in range so we'd
25828 // be calling this at the beginning of every BGC. We are not making this
25829 // more efficient right now - ro segments are currently only used by redhawk.
25830 if (heap_segment_read_only_p (seg))
25832 if ((heap_segment_mem (seg) >= lowest_address) &&
25833 (heap_segment_reserved (seg) <= highest_address))
25835 if (commit_mark_array_by_seg (seg, mark_array))
25837 seg->flags |= heap_segment_flags_ma_committed;
25846 uint8_t* start = max (lowest_address, heap_segment_mem (seg));
25847 uint8_t* end = min (highest_address, heap_segment_reserved (seg));
25848 if (commit_mark_array_by_range (start, end, mark_array))
25850 seg->flags |= heap_segment_flags_ma_pcommitted;
25860 // For normal segments they are by design completely in range so just
25861 // commit the whole mark array for each seg.
25862 if (commit_mark_array_by_seg (seg, mark_array))
25864 if (seg->flags & heap_segment_flags_ma_pcommitted)
25866 seg->flags &= ~heap_segment_flags_ma_pcommitted;
25868 seg->flags |= heap_segment_flags_ma_committed;
25877 seg = heap_segment_next (seg);
25883 // This function doesn't check the commit flag since it's for a new array -
25884 // the mark_array flag for these segments will remain the same.
25885 BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr)
25887 dprintf (GC_TABLE_LOG, ("commiting existing segs on MA %Ix", new_mark_array_addr));
25888 generation* gen = generation_of (max_generation);
25889 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
25894 if (gen != large_object_generation)
25896 gen = large_object_generation;
25897 seg = heap_segment_in_range (generation_start_segment (gen));
25905 if (!commit_mark_array_with_check (seg, new_mark_array_addr))
25910 seg = heap_segment_next (seg);
25913 #ifdef MULTIPLE_HEAPS
25914 if (new_heap_segment)
25916 if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr))
25921 #endif //MULTIPLE_HEAPS
25926 BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array)
25928 #ifdef MULTIPLE_HEAPS
25929 for (int i = 0; i < n_heaps; i++)
25931 if (!g_heaps[i]->commit_new_mark_array (new_mark_array))
25937 if (!commit_new_mark_array (new_mark_array))
25941 #endif //MULTIPLE_HEAPS
25946 void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
25948 // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have
25949 // been set to NULL.
25950 if (mark_array == NULL)
25955 dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array));
25957 size_t flags = seg->flags;
25959 if ((flags & heap_segment_flags_ma_committed) ||
25960 (flags & heap_segment_flags_ma_pcommitted))
25962 uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
25963 uint8_t* end = heap_segment_reserved (seg);
25965 if (flags & heap_segment_flags_ma_pcommitted)
25967 start = max (lowest_address, start);
25968 end = min (highest_address, end);
25971 size_t beg_word = mark_word_of (start);
25972 size_t end_word = mark_word_of (align_on_mark_word (end));
25973 uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]);
25974 uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]);
25975 size_t size = (size_t)(decommit_end - decommit_start);
25977 #ifdef SIMPLE_DPRINTF
25978 dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)",
25980 beg_word, end_word,
25981 (end_word - beg_word) * sizeof (uint32_t),
25982 &mark_array[beg_word],
25983 &mark_array[end_word],
25984 (size_t)(&mark_array[end_word] - &mark_array[beg_word]),
25985 decommit_start, decommit_end,
25987 #endif //SIMPLE_DPRINTF
25989 if (decommit_start < decommit_end)
25991 if (!virtual_decommit (decommit_start, size))
25993 dprintf (GC_TABLE_LOG, ("decommit on %Ix for %Id bytes failed",
25994 decommit_start, size));
25995 assert (!"decommit failed");
25999 dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg));
26003 void gc_heap::background_mark_phase ()
26005 verify_mark_array_cleared();
26008 sc.thread_number = heap_number;
26009 sc.promotion = TRUE;
26010 sc.concurrent = FALSE;
26013 BOOL cooperative_mode = TRUE;
26014 #ifndef MULTIPLE_HEAPS
26015 const int thread = heap_number;
26016 #endif //!MULTIPLE_HEAPS
26018 dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index)));
26020 assert (settings.concurrent);
26025 start = GetCycleCount32();
26028 #ifdef FFIND_OBJECT
26029 if (gen0_must_clear_bricks > 0)
26030 gen0_must_clear_bricks--;
26031 #endif //FFIND_OBJECT
26033 background_soh_alloc_count = 0;
26034 background_loh_alloc_count = 0;
26035 bgc_overflow_count = 0;
26037 bpromoted_bytes (heap_number) = 0;
26038 static uint32_t num_sizedrefs = 0;
26040 background_min_overflow_address = MAX_PTR;
26041 background_max_overflow_address = 0;
26042 background_min_soh_overflow_address = MAX_PTR;
26043 background_max_soh_overflow_address = 0;
26044 processed_soh_overflow_p = FALSE;
26047 //set up the mark lists from g_mark_list
26048 assert (g_mark_list);
26049 mark_list = g_mark_list;
26050 //dont use the mark list for full gc
26051 //because multiple segments are more complex to handle and the list
26052 //is likely to overflow
26053 mark_list_end = &mark_list [0];
26054 mark_list_index = &mark_list [0];
26056 c_mark_list_index = 0;
26058 #ifndef MULTIPLE_HEAPS
26059 shigh = (uint8_t*) 0;
26061 #endif //MULTIPLE_HEAPS
26063 generation* gen = generation_of (max_generation);
26065 dprintf(3,("BGC: stack marking"));
26066 sc.concurrent = TRUE;
26068 GCScan::GcScanRoots(background_promote_callback,
26069 max_generation, max_generation,
26074 dprintf(3,("BGC: finalization marking"));
26075 finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0);
26078 size_t total_loh_size = generation_size (max_generation + 1);
26079 bgc_begin_loh_size = total_loh_size;
26080 bgc_alloc_spin_loh = 0;
26081 bgc_loh_size_increased = 0;
26082 bgc_loh_allocated_in_free = 0;
26083 size_t total_soh_size = generation_sizes (generation_of (max_generation));
26085 dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
26088 //concurrent_print_time_delta ("copying stack roots");
26089 concurrent_print_time_delta ("CS");
26091 FIRE_EVENT(BGC1stNonConEnd);
26093 expanded_in_fgc = FALSE;
26094 saved_overflow_ephemeral_seg = 0;
26095 current_bgc_state = bgc_reset_ww;
26097 // we don't need a join here - just whichever thread that gets here
26098 // first can change the states and call restart_vm.
26099 // this is not true - we can't let the EE run when we are scanning stack.
26100 // since we now allow reset ww to run concurrently and have a join for it,
26101 // we can do restart ee on the 1st thread that got here. Make sure we handle the
26102 // sizedref handles correctly.
26103 #ifdef MULTIPLE_HEAPS
26104 bgc_t_join.join(this, gc_join_restart_ee);
26105 if (bgc_t_join.joined())
26106 #endif //MULTIPLE_HEAPS
26108 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26109 // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
26110 // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
26111 // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
26113 concurrent_print_time_delta ("CRWW begin");
26115 #ifdef MULTIPLE_HEAPS
26116 for (int i = 0; i < n_heaps; i++)
26118 g_heaps[i]->reset_write_watch (FALSE);
26121 reset_write_watch (FALSE);
26122 #endif //MULTIPLE_HEAPS
26124 concurrent_print_time_delta ("CRWW");
26125 #endif //WRITE_WATCH
26126 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26128 num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
26130 // this c_write is not really necessary because restart_vm
26131 // has an instruction that will flush the cpu cache (interlocked
26132 // or whatever) but we don't want to rely on that.
26133 dprintf (BGC_LOG, ("setting cm_in_progress"));
26134 c_write (cm_in_progress, TRUE);
26136 //restart all thread, doing the marking from the array
26137 assert (dont_restart_ee_p);
26138 dont_restart_ee_p = FALSE;
26141 GCToOSInterface::YieldThread (0);
26142 #ifdef MULTIPLE_HEAPS
26143 dprintf(3, ("Starting all gc threads for gc"));
26144 bgc_t_join.restart();
26145 #endif //MULTIPLE_HEAPS
26148 #ifdef MULTIPLE_HEAPS
26149 bgc_t_join.join(this, gc_join_after_reset);
26150 if (bgc_t_join.joined())
26151 #endif //MULTIPLE_HEAPS
26153 disable_preemptive (true);
26155 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26156 // When software write watch is enabled, resetting write watch is done while the runtime is suspended above. The
26157 // post-reset call to revisit_written_pages is only necessary for concurrent reset_write_watch, to discard dirtied
26158 // pages during the concurrent reset.
26161 concurrent_print_time_delta ("CRWW begin");
26163 #ifdef MULTIPLE_HEAPS
26164 for (int i = 0; i < n_heaps; i++)
26166 g_heaps[i]->reset_write_watch (TRUE);
26169 reset_write_watch (TRUE);
26170 #endif //MULTIPLE_HEAPS
26172 concurrent_print_time_delta ("CRWW");
26173 #endif //WRITE_WATCH
26175 #ifdef MULTIPLE_HEAPS
26176 for (int i = 0; i < n_heaps; i++)
26178 g_heaps[i]->revisit_written_pages (TRUE, TRUE);
26181 revisit_written_pages (TRUE, TRUE);
26182 #endif //MULTIPLE_HEAPS
26184 concurrent_print_time_delta ("CRW");
26185 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26187 #ifdef MULTIPLE_HEAPS
26188 for (int i = 0; i < n_heaps; i++)
26190 g_heaps[i]->current_bgc_state = bgc_mark_handles;
26193 current_bgc_state = bgc_mark_handles;
26194 #endif //MULTIPLE_HEAPS
26196 current_c_gc_state = c_gc_state_marking;
26198 enable_preemptive ();
26200 #ifdef MULTIPLE_HEAPS
26201 dprintf(3, ("Joining BGC threads after resetting writewatch"));
26202 bgc_t_join.restart();
26203 #endif //MULTIPLE_HEAPS
26206 disable_preemptive (true);
26208 if (num_sizedrefs > 0)
26210 GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);
26212 enable_preemptive ();
26214 #ifdef MULTIPLE_HEAPS
26215 bgc_t_join.join(this, gc_join_scan_sizedref_done);
26216 if (bgc_t_join.joined())
26218 dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots"));
26219 bgc_t_join.restart();
26221 #endif //MULTIPLE_HEAPS
26223 disable_preemptive (true);
26226 dprintf (3,("BGC: handle table marking"));
26227 GCScan::GcScanHandles(background_promote,
26228 max_generation, max_generation,
26230 //concurrent_print_time_delta ("concurrent marking handle table");
26231 concurrent_print_time_delta ("CRH");
26233 current_bgc_state = bgc_mark_stack;
26234 dprintf (2,("concurrent draining mark list"));
26235 background_drain_mark_list (thread);
26236 //concurrent_print_time_delta ("concurrent marking stack roots");
26237 concurrent_print_time_delta ("CRS");
26239 dprintf (2,("concurrent revisiting dirtied pages"));
26240 revisit_written_pages (TRUE);
26241 revisit_written_pages (TRUE);
26242 //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH");
26243 concurrent_print_time_delta ("CRre");
26245 enable_preemptive ();
26247 #ifdef MULTIPLE_HEAPS
26248 bgc_t_join.join(this, gc_join_concurrent_overflow);
26249 if (bgc_t_join.joined())
26251 uint8_t* all_heaps_max = 0;
26252 uint8_t* all_heaps_min = MAX_PTR;
26254 for (i = 0; i < n_heaps; i++)
26256 dprintf (3, ("heap %d overflow max is %Ix, min is %Ix",
26258 g_heaps[i]->background_max_overflow_address,
26259 g_heaps[i]->background_min_overflow_address));
26260 if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
26261 all_heaps_max = g_heaps[i]->background_max_overflow_address;
26262 if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
26263 all_heaps_min = g_heaps[i]->background_min_overflow_address;
26265 for (i = 0; i < n_heaps; i++)
26267 g_heaps[i]->background_max_overflow_address = all_heaps_max;
26268 g_heaps[i]->background_min_overflow_address = all_heaps_min;
26270 dprintf(3, ("Starting all bgc threads after updating the overflow info"));
26271 bgc_t_join.restart();
26273 #endif //MULTIPLE_HEAPS
26275 disable_preemptive (true);
26277 dprintf (2, ("before CRov count: %d", bgc_overflow_count));
26278 bgc_overflow_count = 0;
26279 background_process_mark_overflow (TRUE);
26280 dprintf (2, ("after CRov count: %d", bgc_overflow_count));
26281 bgc_overflow_count = 0;
26282 //concurrent_print_time_delta ("concurrent processing mark overflow");
26283 concurrent_print_time_delta ("CRov");
26285 // Stop all threads, crawl all stacks and revisit changed pages.
26286 FIRE_EVENT(BGC1stConEnd);
26288 dprintf (2, ("Stopping the EE"));
26290 enable_preemptive ();
26292 #ifdef MULTIPLE_HEAPS
26293 bgc_t_join.join(this, gc_join_suspend_ee);
26294 if (bgc_t_join.joined())
26296 bgc_threads_sync_event.Reset();
26298 dprintf(3, ("Joining BGC threads for non concurrent final marking"));
26299 bgc_t_join.restart();
26301 #endif //MULTIPLE_HEAPS
26303 if (heap_number == 0)
26305 enter_spin_lock (&gc_lock);
26309 bgc_threads_sync_event.Set();
26313 bgc_threads_sync_event.Wait(INFINITE, FALSE);
26314 dprintf (2, ("bgc_threads_sync_event is signalled"));
26317 assert (settings.concurrent);
26318 assert (settings.condemned_generation == max_generation);
26320 dprintf (2, ("clearing cm_in_progress"));
26321 c_write (cm_in_progress, FALSE);
26323 bgc_alloc_lock->check();
26325 current_bgc_state = bgc_final_marking;
26327 //concurrent_print_time_delta ("concurrent marking ended");
26328 concurrent_print_time_delta ("CR");
26330 FIRE_EVENT(BGC2ndNonConBegin);
26332 mark_absorb_new_alloc();
26334 // We need a join here 'cause find_object would complain if the gen0
26335 // bricks of another heap haven't been fixed up. So we need to make sure
26336 // that every heap's gen0 bricks are fixed up before we proceed.
26337 #ifdef MULTIPLE_HEAPS
26338 bgc_t_join.join(this, gc_join_after_absorb);
26339 if (bgc_t_join.joined())
26341 dprintf(3, ("Joining BGC threads after absorb"));
26342 bgc_t_join.restart();
26344 #endif //MULTIPLE_HEAPS
26346 // give VM a chance to do work
26347 GCToEEInterface::GcBeforeBGCSweepWork();
26349 //reset the flag, indicating that the EE no longer expect concurrent
26351 sc.concurrent = FALSE;
26353 total_loh_size = generation_size (max_generation + 1);
26354 total_soh_size = generation_sizes (generation_of (max_generation));
26356 dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
26358 dprintf (2, ("nonconcurrent marking stack roots"));
26359 GCScan::GcScanRoots(background_promote,
26360 max_generation, max_generation,
26362 //concurrent_print_time_delta ("nonconcurrent marking stack roots");
26363 concurrent_print_time_delta ("NRS");
26365 // finalize_queue->EnterFinalizeLock();
26366 finalize_queue->GcScanRoots(background_promote, heap_number, 0);
26367 // finalize_queue->LeaveFinalizeLock();
26369 dprintf (2, ("nonconcurrent marking handle table"));
26370 GCScan::GcScanHandles(background_promote,
26371 max_generation, max_generation,
26373 //concurrent_print_time_delta ("nonconcurrent marking handle table");
26374 concurrent_print_time_delta ("NRH");
26376 dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index)));
26377 revisit_written_pages (FALSE);
26378 //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH");
26379 concurrent_print_time_delta ("NRre LOH");
26381 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26382 #ifdef MULTIPLE_HEAPS
26383 bgc_t_join.join(this, gc_join_disable_software_write_watch);
26384 if (bgc_t_join.joined())
26385 #endif // MULTIPLE_HEAPS
26387 // The runtime is suspended, and we will be doing a final query of dirty pages, so pause tracking written pages to
26388 // avoid further perf penalty after the runtime is restarted
26389 SoftwareWriteWatch::DisableForGCHeap();
26391 #ifdef MULTIPLE_HEAPS
26392 dprintf(3, ("Restarting BGC threads after disabling software write watch"));
26393 bgc_t_join.restart();
26394 #endif // MULTIPLE_HEAPS
26396 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26398 dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count));
26399 bgc_overflow_count = 0;
26401 // Dependent handles need to be scanned with a special algorithm (see the header comment on
26402 // scan_dependent_handles for more detail). We perform an initial scan without processing any mark
26403 // stack overflow. This is not guaranteed to complete the operation but in a common case (where there
26404 // are no dependent handles that are due to be collected) it allows us to optimize away further scans.
26405 // The call to background_scan_dependent_handles is what will cycle through more iterations if
26406 // required and will also perform processing of any mark stack overflow once the dependent handle
26407 // table has been fully promoted.
26408 dprintf (2, ("1st dependent handle scan and process mark overflow"));
26409 GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
26410 background_scan_dependent_handles (&sc);
26411 //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow");
26412 concurrent_print_time_delta ("NR 1st Hov");
26414 dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count));
26415 bgc_overflow_count = 0;
26417 #ifdef MULTIPLE_HEAPS
26418 bgc_t_join.join(this, gc_join_null_dead_short_weak);
26419 if (bgc_t_join.joined())
26420 #endif //MULTIPLE_HEAPS
26422 GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc);
26424 #ifdef MULTIPLE_HEAPS
26425 dprintf(3, ("Joining BGC threads for short weak handle scan"));
26426 bgc_t_join.restart();
26427 #endif //MULTIPLE_HEAPS
26430 // null out the target of short weakref that were not promoted.
26431 GCScan::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);
26433 //concurrent_print_time_delta ("bgc GcShortWeakPtrScan");
26434 concurrent_print_time_delta ("NR GcShortWeakPtrScan");
26438 #ifdef MULTIPLE_HEAPS
26439 bgc_t_join.join(this, gc_join_scan_finalization);
26440 if (bgc_t_join.joined())
26442 dprintf(3, ("Joining BGC threads for finalization"));
26443 bgc_t_join.restart();
26445 #endif //MULTIPLE_HEAPS
26447 //Handle finalization.
26448 dprintf(3,("Marking finalization data"));
26449 //concurrent_print_time_delta ("bgc joined to mark finalization");
26450 concurrent_print_time_delta ("NRj");
26452 // finalize_queue->EnterFinalizeLock();
26453 finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this);
26454 // finalize_queue->LeaveFinalizeLock();
26456 concurrent_print_time_delta ("NRF");
26459 dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count));
26460 bgc_overflow_count = 0;
26462 // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
26463 // for finalization. As before background_scan_dependent_handles will also process any mark stack
26465 dprintf (2, ("2nd dependent handle scan and process mark overflow"));
26466 background_scan_dependent_handles (&sc);
26467 //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow");
26468 concurrent_print_time_delta ("NR 2nd Hov");
26470 #ifdef MULTIPLE_HEAPS
26471 bgc_t_join.join(this, gc_join_null_dead_long_weak);
26472 if (bgc_t_join.joined())
26474 dprintf(2, ("Joining BGC threads for weak pointer deletion"));
26475 bgc_t_join.restart();
26477 #endif //MULTIPLE_HEAPS
26479 // null out the target of long weakref that were not promoted.
26480 GCScan::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
26481 concurrent_print_time_delta ("NR GcWeakPtrScan");
26483 #ifdef MULTIPLE_HEAPS
26484 bgc_t_join.join(this, gc_join_null_dead_syncblk);
26485 if (bgc_t_join.joined())
26486 #endif //MULTIPLE_HEAPS
26488 dprintf (2, ("calling GcWeakPtrScanBySingleThread"));
26489 // scan for deleted entries in the syncblk cache
26490 GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
26491 concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread");
26492 #ifdef MULTIPLE_HEAPS
26493 dprintf(2, ("Starting BGC threads for end of background mark phase"));
26494 bgc_t_join.restart();
26495 #endif //MULTIPLE_HEAPS
26498 gen0_bricks_cleared = FALSE;
26500 dprintf (2, ("end of bgc mark: loh: %d, soh: %d",
26501 generation_size (max_generation + 1),
26502 generation_sizes (generation_of (max_generation))));
26504 for (int gen_idx = max_generation; gen_idx <= (max_generation + 1); gen_idx++)
26506 generation* gen = generation_of (gen_idx);
26507 dynamic_data* dd = dynamic_data_of (gen_idx);
26508 dd_begin_data_size (dd) = generation_size (gen_idx) -
26509 (generation_free_list_space (gen) + generation_free_obj_space (gen)) -
26510 Align (size (generation_allocation_start (gen)));
26511 dd_survived_size (dd) = 0;
26512 dd_pinned_survived_size (dd) = 0;
26513 dd_artificial_pinned_survived_size (dd) = 0;
26514 dd_added_pinned_size (dd) = 0;
26517 heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26518 PREFIX_ASSUME(seg != NULL);
26522 seg->flags &= ~heap_segment_flags_swept;
26524 if (heap_segment_allocated (seg) == heap_segment_mem (seg))
26526 // This can't happen...
26530 if (seg == ephemeral_heap_segment)
26532 heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1));
26536 heap_segment_background_allocated (seg) = heap_segment_allocated (seg);
26539 dprintf (2, ("seg %Ix background allocated is %Ix",
26540 heap_segment_mem (seg),
26541 heap_segment_background_allocated (seg)));
26542 seg = heap_segment_next_rw (seg);
26545 // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running
26546 // we can't let the user code consume the left over parts in these alloc contexts.
26547 repair_allocation_contexts (FALSE);
26550 finish = GetCycleCount32();
26551 mark_time = finish - start;
26554 dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d",
26555 generation_free_list_space (generation_of (max_generation)),
26556 generation_free_obj_space (generation_of (max_generation))));
26558 dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index)));
26562 gc_heap::suspend_EE ()
26564 dprintf (2, ("suspend_EE"));
26565 #ifdef MULTIPLE_HEAPS
26566 gc_heap* hp = gc_heap::g_heaps[0];
26567 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26569 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26570 #endif //MULTIPLE_HEAPS
26573 #ifdef MULTIPLE_HEAPS
26575 gc_heap::bgc_suspend_EE ()
26577 for (int i = 0; i < n_heaps; i++)
26579 gc_heap::g_heaps[i]->reset_gc_done();
26582 dprintf (2, ("bgc_suspend_EE"));
26583 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26585 gc_started = FALSE;
26586 for (int i = 0; i < n_heaps; i++)
26588 gc_heap::g_heaps[i]->set_gc_done();
26593 gc_heap::bgc_suspend_EE ()
26597 dprintf (2, ("bgc_suspend_EE"));
26598 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
26599 gc_started = FALSE;
26602 #endif //MULTIPLE_HEAPS
26605 gc_heap::restart_EE ()
26607 dprintf (2, ("restart_EE"));
26608 #ifdef MULTIPLE_HEAPS
26609 GCToEEInterface::RestartEE(FALSE);
26611 GCToEEInterface::RestartEE(FALSE);
26612 #endif //MULTIPLE_HEAPS
26615 inline uint8_t* gc_heap::high_page ( heap_segment* seg, BOOL concurrent_p)
26619 uint8_t* end = ((seg == ephemeral_heap_segment) ?
26620 generation_allocation_start (generation_of (max_generation-1)) :
26621 heap_segment_allocated (seg));
26622 return align_lower_page (end);
26626 return heap_segment_allocated (seg);
26630 void gc_heap::revisit_written_page (uint8_t* page,
26634 uint8_t*& last_page,
26635 uint8_t*& last_object,
26636 BOOL large_objects_p,
26637 size_t& num_marked_objects)
26639 UNREFERENCED_PARAMETER(seg);
26641 uint8_t* start_address = page;
26643 int align_const = get_alignment_constant (!large_objects_p);
26644 uint8_t* high_address = end;
26645 uint8_t* current_lowest_address = background_saved_lowest_address;
26646 uint8_t* current_highest_address = background_saved_highest_address;
26647 BOOL no_more_loop_p = FALSE;
26650 #ifndef MULTIPLE_HEAPS
26651 const int thread = heap_number;
26652 #endif //!MULTIPLE_HEAPS
26654 if (large_objects_p)
26660 if (((last_page + WRITE_WATCH_UNIT_SIZE) == page)
26661 || (start_address <= last_object))
26667 o = find_first_object (start_address, last_object);
26668 // We can visit the same object again, but on a different page.
26669 assert (o >= last_object);
26673 dprintf (3,("page %Ix start: %Ix, %Ix[ ",
26674 (size_t)page, (size_t)o,
26675 (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE))));
26677 while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26681 if (concurrent_p && large_objects_p)
26683 bgc_alloc_lock->bgc_mark_set (o);
26685 if (((CObjectHeader*)o)->IsFree())
26687 s = unused_array_size (o);
26699 dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm")));
26701 assert (Align (s) >= Align (min_obj_size));
26703 uint8_t* next_o = o + Align (s, align_const);
26705 if (next_o >= start_address)
26707 #ifdef MULTIPLE_HEAPS
26710 // We set last_object here for SVR BGC here because SVR BGC has more than
26711 // one GC thread. When we have more than one GC thread we would run into this
26712 // situation if we skipped unmarked objects:
26713 // bgc thread 1 calls GWW, and detect object X not marked so it would skip it
26715 // bgc thread 2 marks X and all its current children.
26716 // user thread comes along and dirties more (and later) pages in X.
26717 // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything
26718 // on them because it had already skipped X. We need to detect that this object is now
26719 // marked and mark the children on the dirtied pages.
26720 // In the future if we have less BGC threads than we have heaps we should add
26721 // the check to the number of BGC threads.
26724 #endif //MULTIPLE_HEAPS
26726 if (contain_pointers (o) &&
26727 (!((o >= current_lowest_address) && (o < current_highest_address)) ||
26728 background_marked (o)))
26730 dprintf (3, ("going through %Ix", (size_t)o));
26731 go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s),
26732 if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE))
26734 no_more_loop_p = TRUE;
26737 uint8_t* oo = *poo;
26739 num_marked_objects++;
26740 background_mark_object (oo THREAD_NUMBER_ARG);
26745 #ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP // see comment below
26747 #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26748 ((CObjectHeader*)o)->IsFree() &&
26749 (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
26751 // We need to not skip the object here because of this corner scenario:
26752 // A large object was being allocated during BGC mark so we first made it
26753 // into a free object, then cleared its memory. In this loop we would detect
26754 // that it's a free object which normally we would skip. But by the next time
26755 // we call GetWriteWatch we could still be on this object and the object had
26756 // been made into a valid object and some of its memory was changed. We need
26757 // to be sure to process those written pages so we can't skip the object just
26760 // Similarly, when using software write watch, don't advance last_object when
26761 // the current object is a free object that spans beyond the current page or
26762 // high_address. Software write watch acquires gc_lock before the concurrent
26763 // GetWriteWatch() call during revisit_written_pages(). A foreground GC may
26764 // happen at that point and allocate from this free region, so when
26765 // revisit_written_pages() continues, it cannot skip now-valid objects in this
26767 no_more_loop_p = TRUE;
26772 if (concurrent_p && large_objects_p)
26774 bgc_alloc_lock->bgc_mark_done ();
26776 if (no_more_loop_p)
26783 #ifdef MULTIPLE_HEAPS
26786 assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)));
26789 #endif //MULTIPLE_HEAPS
26794 dprintf (3,("Last object: %Ix", (size_t)last_object));
26795 last_page = align_write_watch_lower_page (o);
26798 // When reset_only_p is TRUE, we should only reset pages that are in range
26799 // because we need to consider the segments or part of segments that were
26800 // allocated out of range all live.
26801 void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
26804 if (concurrent_p && !reset_only_p)
26806 current_bgc_state = bgc_revisit_soh;
26809 size_t total_dirtied_pages = 0;
26810 size_t total_marked_objects = 0;
26812 heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
26814 PREFIX_ASSUME(seg != NULL);
26816 bool reset_watch_state = !!concurrent_p;
26817 bool is_runtime_suspended = !concurrent_p;
26818 BOOL small_object_segments = TRUE;
26819 int align_const = get_alignment_constant (small_object_segments);
26825 if (small_object_segments)
26827 //switch to large segment
26828 if (concurrent_p && !reset_only_p)
26830 current_bgc_state = bgc_revisit_loh;
26835 dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26836 fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26837 concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
26838 total_dirtied_pages = 0;
26839 total_marked_objects = 0;
26842 small_object_segments = FALSE;
26843 //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");
26845 dprintf (3, ("now revisiting large object segments"));
26846 align_const = get_alignment_constant (small_object_segments);
26847 seg = heap_segment_rw (generation_start_segment (large_object_generation));
26849 PREFIX_ASSUME(seg != NULL);
26857 dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages));
26861 dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
26862 fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
26867 uint8_t* base_address = (uint8_t*)heap_segment_mem (seg);
26868 //we need to truncate to the base of the page because
26869 //some newly allocated could exist beyond heap_segment_allocated
26870 //and if we reset the last page write watch status,
26871 // they wouldn't be guaranteed to be visited -> gc hole.
26872 uintptr_t bcount = array_size;
26873 uint8_t* last_page = 0;
26874 uint8_t* last_object = heap_segment_mem (seg);
26875 uint8_t* high_address = 0;
26877 BOOL skip_seg_p = FALSE;
26881 if ((heap_segment_mem (seg) >= background_saved_lowest_address) ||
26882 (heap_segment_reserved (seg) <= background_saved_highest_address))
26884 dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number,
26885 heap_segment_mem (seg), heap_segment_reserved (seg)));
26892 dprintf (3, ("looking at seg %Ix", (size_t)last_object));
26896 base_address = max (base_address, background_saved_lowest_address);
26897 dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address));
26900 dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address,
26901 heap_segment_mem (seg), heap_segment_reserved (seg)));
26908 high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
26909 high_address = min (high_address, background_saved_highest_address);
26913 high_address = high_page (seg, concurrent_p);
26916 if ((base_address < high_address) &&
26917 (bcount >= array_size))
26919 ptrdiff_t region_size = high_address - base_address;
26920 dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));
26922 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26923 // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan
26924 // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is
26925 // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint
26927 if (!is_runtime_suspended)
26929 enter_spin_lock(&gc_lock);
26931 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26933 get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size,
26934 (void**)background_written_addresses,
26935 &bcount, is_runtime_suspended);
26937 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26938 if (!is_runtime_suspended)
26940 leave_spin_lock(&gc_lock);
26942 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
26946 total_dirtied_pages += bcount;
26948 dprintf (3, ("Found %d pages [%Ix, %Ix[",
26949 bcount, (size_t)base_address, (size_t)high_address));
26954 for (unsigned i = 0; i < bcount; i++)
26956 uint8_t* page = (uint8_t*)background_written_addresses[i];
26957 dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i,
26958 (size_t)page, (size_t)high_address));
26959 if (page < high_address)
26961 //search for marked objects in the page
26962 revisit_written_page (page, high_address, concurrent_p,
26963 seg, last_page, last_object,
26964 !small_object_segments,
26965 total_marked_objects);
26969 dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address));
26970 assert (!"page shouldn't have exceeded limit");
26975 if (bcount >= array_size){
26976 base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE;
26977 bcount = array_size;
26987 seg = heap_segment_next_rw (seg);
26990 #endif //WRITE_WATCH
26993 void gc_heap::background_grow_c_mark_list()
26995 assert (c_mark_list_index >= c_mark_list_length);
26996 BOOL should_drain_p = FALSE;
26998 #ifndef MULTIPLE_HEAPS
26999 const int thread = heap_number;
27000 #endif //!MULTIPLE_HEAPS
27002 dprintf (2, ("stack copy buffer overflow"));
27003 uint8_t** new_c_mark_list = 0;
27006 if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*))))
27008 should_drain_p = TRUE;
27012 new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2];
27013 if (new_c_mark_list == 0)
27015 should_drain_p = TRUE;
27019 if (should_drain_p)
27022 dprintf (2, ("No more memory for the stacks copy, draining.."));
27023 //drain the list by marking its elements
27024 background_drain_mark_list (thread);
27028 assert (new_c_mark_list);
27029 memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*));
27030 c_mark_list_length = c_mark_list_length*2;
27031 delete c_mark_list;
27032 c_mark_list = new_c_mark_list;
27036 void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
27039 UNREFERENCED_PARAMETER(sc);
27040 //in order to save space on the array, mark the object,
27041 //knowing that it will be visited later
27042 assert (settings.concurrent);
27044 THREAD_NUMBER_FROM_CONTEXT;
27045 #ifndef MULTIPLE_HEAPS
27046 const int thread = 0;
27047 #endif //!MULTIPLE_HEAPS
27049 uint8_t* o = (uint8_t*)*ppObject;
27056 gc_heap* hp = gc_heap::heap_of (o);
27058 if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
27063 #ifdef INTERIOR_POINTERS
27064 if (flags & GC_CALL_INTERIOR)
27066 o = hp->find_object (o, hp->background_saved_lowest_address);
27070 #endif //INTERIOR_POINTERS
27072 #ifdef FEATURE_CONSERVATIVE_GC
27073 // For conservative GC, a value on stack may point to middle of a free object.
27074 // In this case, we don't need to promote the pointer.
27075 if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
27079 #endif //FEATURE_CONSERVATIVE_GC
27082 ((CObjectHeader*)o)->Validate();
27085 dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o));
27086 if (o && (size (o) > loh_size_threshold))
27088 dprintf (3, ("Brc %Ix", (size_t)o));
27091 if (hpt->c_mark_list_index >= hpt->c_mark_list_length)
27093 hpt->background_grow_c_mark_list();
27095 dprintf (3, ("pushing %08x into mark_list", (size_t)o));
27096 hpt->c_mark_list [hpt->c_mark_list_index++] = o;
27098 STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, " GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
27101 void gc_heap::mark_absorb_new_alloc()
27103 fix_allocation_contexts (FALSE);
27105 gen0_bricks_cleared = FALSE;
27107 clear_gen0_bricks();
27110 BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
27112 BOOL success = FALSE;
27113 BOOL thread_created = FALSE;
27114 dprintf (2, ("Preparing gc thread"));
27115 gh->bgc_threads_timeout_cs.Enter();
27116 if (!(gh->bgc_thread_running))
27118 dprintf (2, ("GC thread not runnning"));
27119 if ((gh->bgc_thread == 0) && create_bgc_thread(gh))
27122 thread_created = TRUE;
27127 dprintf (3, ("GC thread already running"));
27130 gh->bgc_threads_timeout_cs.Leave();
27133 FIRE_EVENT(GCCreateConcurrentThread_V1);
27138 BOOL gc_heap::create_bgc_thread(gc_heap* gh)
27140 assert (background_gc_done_event.IsValid());
27142 //dprintf (2, ("Creating BGC thread"));
27144 gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET Background GC");
27145 return gh->bgc_thread_running;
27148 BOOL gc_heap::create_bgc_threads_support (int number_of_heaps)
27151 dprintf (3, ("Creating concurrent GC thread for the first time"));
27152 if (!background_gc_done_event.CreateManualEventNoThrow(TRUE))
27156 if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE))
27160 if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE))
27164 if (!bgc_start_event.CreateManualEventNoThrow(FALSE))
27169 #ifdef MULTIPLE_HEAPS
27170 bgc_t_join.init (number_of_heaps, join_flavor_bgc);
27172 UNREFERENCED_PARAMETER(number_of_heaps);
27173 #endif //MULTIPLE_HEAPS
27181 if (background_gc_done_event.IsValid())
27183 background_gc_done_event.CloseEvent();
27185 if (bgc_threads_sync_event.IsValid())
27187 bgc_threads_sync_event.CloseEvent();
27189 if (ee_proceed_event.IsValid())
27191 ee_proceed_event.CloseEvent();
27193 if (bgc_start_event.IsValid())
27195 bgc_start_event.CloseEvent();
27202 BOOL gc_heap::create_bgc_thread_support()
27207 if (!gc_lh_block_event.CreateManualEventNoThrow(FALSE))
27212 //needs to have room for enough smallest objects fitting on a page
27213 parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE];
27219 make_c_mark_list (parr);
27227 if (gc_lh_block_event.IsValid())
27229 gc_lh_block_event.CloseEvent();
27236 int gc_heap::check_for_ephemeral_alloc()
27238 int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1);
27242 #ifdef MULTIPLE_HEAPS
27243 for (int heap_index = 0; heap_index < n_heaps; heap_index++)
27244 #endif //MULTIPLE_HEAPS
27246 for (int i = 0; i <= (max_generation - 1); i++)
27248 #ifdef MULTIPLE_HEAPS
27249 if (g_heaps[heap_index]->get_new_allocation (i) <= 0)
27251 if (get_new_allocation (i) <= 0)
27252 #endif //MULTIPLE_HEAPS
27254 gen = max (gen, i);
27265 // Wait for gc to finish sequential part
27266 void gc_heap::wait_to_proceed()
27268 assert (background_gc_done_event.IsValid());
27269 assert (bgc_start_event.IsValid());
27271 user_thread_wait(&ee_proceed_event, FALSE);
27274 // Start a new concurrent gc
27275 void gc_heap::start_c_gc()
27277 assert (background_gc_done_event.IsValid());
27278 assert (bgc_start_event.IsValid());
27280 //Need to make sure that the gc thread is in the right place.
27281 background_gc_done_event.Wait(INFINITE, FALSE);
27282 background_gc_done_event.Reset();
27283 bgc_start_event.Set();
27286 void gc_heap::do_background_gc()
27288 dprintf (2, ("starting a BGC"));
27289 #ifdef MULTIPLE_HEAPS
27290 for (int i = 0; i < n_heaps; i++)
27292 g_heaps[i]->init_background_gc();
27295 init_background_gc();
27296 #endif //MULTIPLE_HEAPS
27297 //start the background gc
27300 //wait until we get restarted by the BGC.
27304 void gc_heap::kill_gc_thread()
27306 //assert (settings.concurrent == FALSE);
27308 // We are doing a two-stage shutdown now.
27309 // In the first stage, we do minimum work, and call ExitProcess at the end.
27310 // In the secodn stage, we have the Loader lock and only one thread is
27311 // alive. Hence we do not need to kill gc thread.
27312 background_gc_done_event.CloseEvent();
27313 gc_lh_block_event.CloseEvent();
27314 bgc_start_event.CloseEvent();
27315 bgc_threads_timeout_cs.Destroy();
27317 recursive_gc_sync::shutdown();
27320 void gc_heap::bgc_thread_function()
27322 assert (background_gc_done_event.IsValid());
27323 assert (bgc_start_event.IsValid());
27325 dprintf (3, ("gc_thread thread starting..."));
27327 BOOL do_exit = FALSE;
27329 bool cooperative_mode = true;
27330 bgc_thread_id.SetToCurrentThread();
27331 dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()));
27334 // Wait for work to do...
27335 dprintf (3, ("bgc thread: waiting..."));
27337 cooperative_mode = enable_preemptive ();
27338 //current_thread->m_fPreemptiveGCDisabled = 0;
27340 uint32_t result = bgc_start_event.Wait(
27342 #ifdef MULTIPLE_HEAPS
27346 #endif //MULTIPLE_HEAPS
27348 #ifdef MULTIPLE_HEAPS
27352 #endif //MULTIPLE_HEAPS
27355 dprintf (2, ("gc thread: finished waiting"));
27357 // not calling disable_preemptive here 'cause we
27358 // can't wait for GC complete here - RestartEE will be called
27359 // when we've done the init work.
27361 if (result == WAIT_TIMEOUT)
27363 // Should join the bgc threads and terminate all of them
27365 dprintf (1, ("GC thread timeout"));
27366 bgc_threads_timeout_cs.Enter();
27367 if (!keep_bgc_threads_p)
27369 dprintf (2, ("GC thread exiting"));
27370 bgc_thread_running = FALSE;
27372 bgc_thread_id.Clear();
27375 bgc_threads_timeout_cs.Leave();
27380 dprintf (3, ("GC thread needed, not exiting"));
27384 // if we signal the thread with no concurrent work to do -> exit
27385 if (!settings.concurrent)
27387 dprintf (3, ("no concurrent GC needed, exiting"));
27393 recursive_gc_sync::begin_background();
27394 dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d",
27395 generation_free_list_space (generation_of (max_generation)),
27396 generation_free_obj_space (generation_of (max_generation)),
27397 dd_fragmentation (dynamic_data_of (max_generation))));
27401 current_bgc_state = bgc_not_in_process;
27404 //trace_gc = FALSE;
27407 enable_preemptive ();
27408 #ifdef MULTIPLE_HEAPS
27409 bgc_t_join.join(this, gc_join_done);
27410 if (bgc_t_join.joined())
27411 #endif //MULTIPLE_HEAPS
27413 enter_spin_lock (&gc_lock);
27414 dprintf (SPINLOCK_LOG, ("bgc Egc"));
27416 bgc_start_event.Reset();
27418 #ifdef MULTIPLE_HEAPS
27419 for (int gen = max_generation; gen <= (max_generation + 1); gen++)
27421 size_t desired_per_heap = 0;
27422 size_t total_desired = 0;
27425 for (int i = 0; i < n_heaps; i++)
27428 dd = hp->dynamic_data_of (gen);
27429 size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
27430 if (temp_total_desired < total_desired)
27433 total_desired = (size_t)MAX_PTR;
27436 total_desired = temp_total_desired;
27439 desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE));
27441 for (int i = 0; i < n_heaps; i++)
27443 hp = gc_heap::g_heaps[i];
27444 dd = hp->dynamic_data_of (gen);
27445 dd_desired_allocation (dd) = desired_per_heap;
27446 dd_gc_new_allocation (dd) = desired_per_heap;
27447 dd_new_allocation (dd) = desired_per_heap;
27450 #endif //MULTIPLE_HEAPS
27451 #ifdef MULTIPLE_HEAPS
27453 #endif //MULTIPLE_HEAPS
27455 c_write (settings.concurrent, FALSE);
27456 recursive_gc_sync::end_background();
27457 keep_bgc_threads_p = FALSE;
27458 background_gc_done_event.Set();
27460 dprintf (SPINLOCK_LOG, ("bgc Lgc"));
27461 leave_spin_lock (&gc_lock);
27462 #ifdef MULTIPLE_HEAPS
27463 dprintf(1, ("End of BGC - starting all BGC threads"));
27464 bgc_t_join.restart();
27465 #endif //MULTIPLE_HEAPS
27467 // We can't disable preempt here because there might've been a GC already
27468 // started and decided to do a BGC and waiting for a BGC thread to restart
27469 // vm. That GC will be waiting in wait_to_proceed and we are waiting for it
27470 // to restart the VM so we deadlock.
27471 //gc_heap::disable_preemptive (true);
27474 FIRE_EVENT(GCTerminateConcurrentThread_V1);
27476 dprintf (3, ("bgc_thread thread exiting"));
27480 #endif //BACKGROUND_GC
27482 //Clear the cards [start_card, end_card[
27483 void gc_heap::clear_cards (size_t start_card, size_t end_card)
27485 if (start_card < end_card)
27487 size_t start_word = card_word (start_card);
27488 size_t end_word = card_word (end_card);
27489 if (start_word < end_word)
27491 // Figure out the bit positions of the cards within their words
27492 unsigned bits = card_bit (start_card);
27493 card_table [start_word] &= lowbits (~0, bits);
27494 for (size_t i = start_word+1; i < end_word; i++)
27495 card_table [i] = 0;
27496 bits = card_bit (end_card);
27497 // Don't write beyond end_card (and possibly uncommitted card table space).
27500 card_table [end_word] &= highbits (~0, bits);
27505 // If the start and end cards are in the same word, just clear the appropriate card
27506 // bits in that word.
27507 card_table [start_word] &= (lowbits (~0, card_bit (start_card)) |
27508 highbits (~0, card_bit (end_card)));
27510 #ifdef VERYSLOWDEBUG
27511 size_t card = start_card;
27512 while (card < end_card)
27514 assert (! (card_set_p (card)));
27517 #endif //VERYSLOWDEBUG
27518 dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[",
27519 start_card, (size_t)card_address (start_card),
27520 end_card, (size_t)card_address (end_card)));
27524 void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address)
27526 size_t start_card = card_of (align_on_card (start_address));
27527 size_t end_card = card_of (align_lower_card (end_address));
27528 clear_cards (start_card, end_card);
27531 // copy [srccard, ...[ to [dst_card, end_card[
27532 // This will set the same bit twice. Can be optimized.
27534 void gc_heap::copy_cards (size_t dst_card,
27539 // If the range is empty, this function is a no-op - with the subtlety that
27540 // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be
27541 // outside the committed region. To avoid the access, leave early.
27542 if (!(dst_card < end_card))
27545 unsigned int srcbit = card_bit (src_card);
27546 unsigned int dstbit = card_bit (dst_card);
27547 size_t srcwrd = card_word (src_card);
27548 size_t dstwrd = card_word (dst_card);
27549 unsigned int srctmp = card_table[srcwrd];
27550 unsigned int dsttmp = card_table[dstwrd];
27552 for (size_t card = dst_card; card < end_card; card++)
27554 if (srctmp & (1 << srcbit))
27555 dsttmp |= 1 << dstbit;
27557 dsttmp &= ~(1 << dstbit);
27558 if (!(++srcbit % 32))
27560 srctmp = card_table[++srcwrd];
27566 if (srctmp & (1 << srcbit))
27567 dsttmp |= 1 << dstbit;
27570 if (!(++dstbit % 32))
27572 card_table[dstwrd] = dsttmp;
27574 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27577 card_bundle_set(cardw_card_bundle(dstwrd));
27582 dsttmp = card_table[dstwrd];
27587 card_table[dstwrd] = dsttmp;
27589 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27592 card_bundle_set(cardw_card_bundle(dstwrd));
27597 void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27599 ptrdiff_t relocation_distance = src - dest;
27600 size_t start_dest_card = card_of (align_on_card (dest));
27601 size_t end_dest_card = card_of (dest + len - 1);
27602 size_t dest_card = start_dest_card;
27603 size_t src_card = card_of (card_address (dest_card)+relocation_distance);
27604 dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ",
27605 src_card, (size_t)src, dest_card, (size_t)dest));
27606 dprintf (3,(" %Ix->%Ix:%Ix[",
27607 (size_t)src+len, end_dest_card, (size_t)dest+len));
27609 dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix",
27610 dest, src, len, relocation_distance, (align_on_card (dest))));
27612 dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix",
27613 start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest)));
27615 //First card has two boundaries
27616 if (start_dest_card != card_of (dest))
27618 if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&&
27619 card_set_p (card_of (card_address (start_dest_card) + relocation_distance)))
27621 dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix",
27622 (card_address (start_dest_card) + relocation_distance),
27623 card_of (card_address (start_dest_card) + relocation_distance),
27625 card_of (src + len - 1)));
27627 dprintf (3, ("setting card: %Ix", card_of (dest)));
27628 set_card (card_of (dest));
27632 if (card_set_p (card_of (src)))
27633 set_card (card_of (dest));
27636 copy_cards (dest_card, src_card, end_dest_card,
27637 ((dest - align_lower_card (dest)) != (src - align_lower_card (src))));
27639 //Last card has two boundaries.
27640 if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) &&
27641 card_set_p (card_of (card_address (end_dest_card) + relocation_distance)))
27643 dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix",
27644 (card_address (end_dest_card) + relocation_distance),
27645 card_of (card_address (end_dest_card) + relocation_distance),
27649 dprintf (3, ("setting card: %Ix", end_dest_card));
27650 set_card (end_dest_card);
27653 if (card_set_p (card_of (src + len - 1)))
27654 set_card (end_dest_card);
27656 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
27657 card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card))));
27661 #ifdef BACKGROUND_GC
27662 // this does not need the Interlocked version of mark_array_set_marked.
27663 void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
27665 dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[",
27666 (size_t)src, (size_t)dest,
27667 (size_t)src+len, (size_t)dest+len));
27669 uint8_t* src_o = src;
27671 uint8_t* src_end = src + len;
27672 int align_const = get_alignment_constant (TRUE);
27673 ptrdiff_t reloc = dest - src;
27675 while (src_o < src_end)
27677 uint8_t* next_o = src_o + Align (size (src_o), align_const);
27679 if (background_object_marked (src_o, TRUE))
27681 dest_o = src_o + reloc;
27683 //if (background_object_marked (dest_o, FALSE))
27685 // dprintf (3, ("*%Ix shouldn't have already been marked!", (size_t)(dest_o)));
27686 // FATAL_GC_ERROR();
27689 background_mark (dest_o,
27690 background_saved_lowest_address,
27691 background_saved_highest_address);
27692 dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o)));
27698 #endif //BACKGROUND_GC
27700 void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o)
27702 size_t new_current_brick = brick_of (o);
27703 set_brick (new_current_brick,
27704 (o - brick_address (new_current_brick)));
27705 size_t b = 1 + new_current_brick;
27706 size_t limit = brick_of (next_o);
27707 //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)",
27708 dprintf(3,("b:%Ix->%Ix-%Ix",
27709 new_current_brick, (size_t)o, (size_t)next_o));
27712 set_brick (b,(new_current_brick - b));
27717 // start can not be >= heap_segment_allocated for the segment.
27718 uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
27720 size_t brick = brick_of (start);
27722 //last_object == null -> no search shortcut needed
27723 if ((brick == brick_of (first_object) || (start <= first_object)))
27729 ptrdiff_t min_brick = (ptrdiff_t)brick_of (first_object);
27730 ptrdiff_t prev_brick = (ptrdiff_t)brick - 1;
27731 int brick_entry = 0;
27734 if (prev_brick < min_brick)
27738 if ((brick_entry = get_brick_entry(prev_brick)) >= 0)
27742 assert (! ((brick_entry == 0)));
27743 prev_brick = (brick_entry + prev_brick);
27746 o = ((prev_brick < min_brick) ? first_object :
27747 brick_address (prev_brick) + brick_entry - 1);
27748 assert (o <= start);
27751 assert (Align (size (o)) >= Align (min_obj_size));
27752 uint8_t* next_o = o + Align (size (o));
27753 size_t curr_cl = (size_t)next_o / brick_size;
27754 size_t min_cl = (size_t)first_object / brick_size;
27756 //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
27758 unsigned int n_o = 1;
27761 uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27763 while (next_o <= start)
27771 assert (Align (size (o)) >= Align (min_obj_size));
27772 next_o = o + Align (size (o));
27774 }while (next_o < next_b);
27776 if (((size_t)next_o / brick_size) != curr_cl)
27778 if (curr_cl >= min_cl)
27780 fix_brick_to_highest (o, next_o);
27782 curr_cl = (size_t) next_o / brick_size;
27784 next_b = min (align_lower_brick (next_o) + brick_size, start+1);
27787 size_t bo = brick_of (o);
27788 //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix",
27789 dprintf (3, ("%Id o, [%Ix-[%Ix",
27793 set_brick (bo, (o - brick_address(bo)));
27808 // Find the first non-zero card word between cardw and cardw_end.
27809 // The index of the word we find is returned in cardw.
27810 BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
27812 dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix",
27813 dd_collection_count (dynamic_data_of (0)), cardw, cardw_end));
27815 if (card_bundles_enabled())
27817 size_t cardb = cardw_card_bundle (cardw);
27818 size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end));
27821 // Find a non-zero bundle
27822 while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0))
27826 if (cardb == end_cardb)
27829 uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)];
27830 uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)];
27831 while ((card_word < card_word_end) && !(*card_word))
27836 if (card_word != card_word_end)
27838 cardw = (card_word - &card_table[0]);
27841 else if ((cardw <= card_bundle_cardw (cardb)) &&
27842 (card_word == &card_table [card_bundle_cardw (cardb+1)]))
27844 // a whole bundle was explored and is empty
27845 dprintf (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[",
27846 dd_collection_count (dynamic_data_of (0)),
27847 cardb, card_bundle_cardw (cardb),
27848 card_bundle_cardw (cardb+1)));
27849 card_bundle_clear (cardb);
27857 uint32_t* card_word = &card_table[cardw];
27858 uint32_t* card_word_end = &card_table [cardw_end];
27860 while (card_word < card_word_end)
27862 if ((*card_word) != 0)
27864 cardw = (card_word - &card_table [0]);
27876 #endif //CARD_BUNDLE
27878 // Find cards that are set between two points in a card table.
27880 // card_table : The card table.
27881 // card : [in/out] As input, the card to start searching from.
27882 // As output, the first card that's set.
27883 // card_word_end : The card word at which to stop looking.
27884 // end_card : [out] The last card which is set.
27885 BOOL gc_heap::find_card(uint32_t* card_table,
27887 size_t card_word_end,
27890 uint32_t* last_card_word;
27891 uint32_t card_word_value;
27892 uint32_t bit_position;
27894 // Find the first card which is set
27895 last_card_word = &card_table [card_word (card)];
27896 bit_position = card_bit (card);
27897 card_word_value = (*last_card_word) >> bit_position;
27898 if (!card_word_value)
27902 // Using the card bundle, go through the remaining card words between here and
27903 // card_word_end until we find one that is non-zero.
27904 size_t lcw = card_word(card) + 1;
27905 if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE)
27911 last_card_word = &card_table [lcw];
27912 card_word_value = *last_card_word;
27915 #else //CARD_BUNDLE
27916 // Go through the remaining card words between here and card_word_end until we find
27917 // one that is non-zero.
27923 while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word));
27924 if (last_card_word < &card_table [card_word_end])
27926 card_word_value = *last_card_word;
27930 // We failed to find any non-zero card words before we got to card_word_end
27933 #endif //CARD_BUNDLE
27937 // Look for the lowest bit set
27938 if (card_word_value)
27940 while (!(card_word_value & 1))
27943 card_word_value = card_word_value / 2;
27947 // card is the card word index * card size + the bit index within the card
27948 card = (last_card_word - &card_table[0]) * card_word_width + bit_position;
27952 // Keep going until we get to an un-set card.
27954 card_word_value = card_word_value / 2;
27956 // If we reach the end of the card word and haven't hit a 0 yet, start going
27957 // card word by card word until we get to one that's not fully set (0xFFFF...)
27958 // or we reach card_word_end.
27959 if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end]))
27963 card_word_value = *(++last_card_word);
27964 } while ((last_card_word < &card_table [card_word_end]) &&
27967 (card_word_value == (1 << card_word_width)-1)
27969 // if left shift count >= width of type,
27970 // gcc reports error.
27971 (card_word_value == ~0u)
27976 } while (card_word_value & 1);
27978 end_card = (last_card_word - &card_table [0])* card_word_width + bit_position;
27980 //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card));
27981 dprintf (3, ("fc: [%Ix, %Ix[", card, end_card));
27986 //because of heap expansion, computing end is complicated.
27987 uint8_t* compute_next_end (heap_segment* seg, uint8_t* low)
27989 if ((low >= heap_segment_mem (seg)) &&
27990 (low < heap_segment_allocated (seg)))
27993 return heap_segment_allocated (seg);
27997 gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
28000 UNREFERENCED_PARAMETER(low);
28002 //when relocating, the fault line is the plan start of the younger
28003 //generation because the generation is promoted.
28004 if (relocating && (gen_number == (settings.condemned_generation + 1)))
28006 generation* gen = generation_of (gen_number - 1);
28007 uint8_t* gen_alloc = generation_plan_allocation_start (gen);
28008 assert (gen_alloc);
28013 assert (gen_number > settings.condemned_generation);
28014 return generation_allocation_start (generation_of (gen_number - 1 ));
28020 gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
28021 size_t& cg_pointers_found)
28024 if ((gc_low <= o) && (gc_high > o))
28028 #ifdef MULTIPLE_HEAPS
28031 gc_heap* hp = heap_of (o);
28034 if ((hp->gc_low <= o) &&
28041 #endif //MULTIPLE_HEAPS
28042 cg_pointers_found ++;
28043 dprintf (4, ("keep card live for %Ix", o));
28047 gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen,
28048 size_t& cg_pointers_found,
28049 card_fn fn, uint8_t* nhigh,
28050 uint8_t* next_boundary)
28053 if ((gc_low <= *poo) && (gc_high > *poo))
28056 call_fn(fn) (poo THREAD_NUMBER_ARG);
28058 #ifdef MULTIPLE_HEAPS
28061 gc_heap* hp = heap_of_gc (*poo);
28064 if ((hp->gc_low <= *poo) &&
28065 (hp->gc_high > *poo))
28068 call_fn(fn) (poo THREAD_NUMBER_ARG);
28070 if ((fn == &gc_heap::relocate_address) ||
28071 ((hp->ephemeral_low <= *poo) &&
28072 (hp->ephemeral_high > *poo)))
28074 cg_pointers_found++;
28078 #endif //MULTIPLE_HEAPS
28079 if ((next_boundary <= *poo) && (nhigh > *poo))
28081 cg_pointers_found ++;
28082 dprintf (4, ("cg pointer %Ix found, %Id so far",
28083 (size_t)*poo, cg_pointers_found ));
28088 BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
28089 size_t& cg_pointers_found,
28090 size_t& n_eph, size_t& n_card_set,
28091 size_t& card, size_t& end_card,
28092 BOOL& foundp, uint8_t*& start_address,
28093 uint8_t*& limit, size_t& n_cards_cleared)
28095 dprintf (3, ("pointer %Ix past card %Ix", (size_t)po, (size_t)card));
28096 dprintf (3, ("ct: %Id cg", cg_pointers_found));
28097 BOOL passed_end_card_p = FALSE;
28100 if (cg_pointers_found == 0)
28102 //dprintf(3,(" Clearing cards [%Ix, %Ix[ ",
28103 dprintf(3,(" CC [%Ix, %Ix[ ",
28104 (size_t)card_address(card), (size_t)po));
28105 clear_cards (card, card_of(po));
28106 n_card_set -= (card_of (po) - card);
28107 n_cards_cleared += (card_of (po) - card);
28110 n_eph +=cg_pointers_found;
28111 cg_pointers_found = 0;
28112 card = card_of (po);
28113 if (card >= end_card)
28115 passed_end_card_p = TRUE;
28116 dprintf (3, ("card %Ix exceeding end_card %Ix",
28117 (size_t)card, (size_t)end_card));
28118 foundp = find_card (card_table, card, card_word_end, end_card);
28121 n_card_set+= end_card - card;
28122 start_address = card_address (card);
28123 dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix",
28124 (size_t)card, (size_t)start_address,
28125 (size_t)card_address (end_card)));
28127 limit = min (end, card_address (end_card));
28129 assert (!((limit == card_address (end_card))&&
28130 card_set_p (end_card)));
28133 return passed_end_card_p;
28136 void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating)
28138 #ifdef BACKGROUND_GC
28139 dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)",
28140 current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start));
28142 heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
28143 PREFIX_ASSUME(soh_seg != NULL);
28147 dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix",
28149 heap_segment_background_allocated (soh_seg),
28150 heap_segment_allocated (soh_seg)));
28152 soh_seg = heap_segment_next_rw (soh_seg);
28154 #endif //BACKGROUND_GC
28156 uint8_t* low = gc_low;
28157 uint8_t* high = gc_high;
28158 size_t end_card = 0;
28160 generation* oldest_gen = generation_of (max_generation);
28161 int curr_gen_number = max_generation;
28162 uint8_t* gen_boundary = generation_allocation_start(generation_of(curr_gen_number - 1));
28163 uint8_t* next_boundary = compute_next_boundary(gc_low, curr_gen_number, relocating);
28165 heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen));
28166 PREFIX_ASSUME(seg != NULL);
28168 uint8_t* beg = generation_allocation_start (oldest_gen);
28169 uint8_t* end = compute_next_end (seg, low);
28170 uint8_t* last_object = beg;
28172 size_t cg_pointers_found = 0;
28174 size_t card_word_end = (card_of (align_on_card_word (end)) / card_word_width);
28178 size_t n_card_set = 0;
28179 uint8_t* nhigh = (relocating ?
28180 heap_segment_plan_allocated (ephemeral_heap_segment) : high);
28182 BOOL foundp = FALSE;
28183 uint8_t* start_address = 0;
28184 uint8_t* limit = 0;
28185 size_t card = card_of (beg);
28186 #ifdef BACKGROUND_GC
28187 BOOL consider_bgc_mark_p = FALSE;
28188 BOOL check_current_sweep_p = FALSE;
28189 BOOL check_saved_sweep_p = FALSE;
28190 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
28191 #endif //BACKGROUND_GC
28193 dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end));
28194 size_t total_cards_cleared = 0;
28198 if (card_of(last_object) > card)
28200 dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
28201 if (cg_pointers_found == 0)
28203 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object));
28204 clear_cards (card, card_of(last_object));
28205 n_card_set -= (card_of (last_object) - card);
28206 total_cards_cleared += (card_of (last_object) - card);
28209 n_eph += cg_pointers_found;
28210 cg_pointers_found = 0;
28211 card = card_of (last_object);
28214 if (card >= end_card)
28216 foundp = find_card (card_table, card, card_word_end, end_card);
28219 n_card_set += end_card - card;
28220 start_address = max (beg, card_address (card));
28222 limit = min (end, card_address (end_card));
28224 if (!foundp || (last_object >= end) || (card_address (card) >= end))
28226 if (foundp && (cg_pointers_found == 0))
28228 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
28230 clear_cards (card, card_of (end));
28231 n_card_set -= (card_of (end) - card);
28232 total_cards_cleared += (card_of (end) - card);
28234 n_eph += cg_pointers_found;
28235 cg_pointers_found = 0;
28236 if ((seg = heap_segment_next_in_range (seg)) != 0)
28238 #ifdef BACKGROUND_GC
28239 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
28240 #endif //BACKGROUND_GC
28241 beg = heap_segment_mem (seg);
28242 end = compute_next_end (seg, low);
28243 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
28244 card = card_of (beg);
28255 assert (card_set_p (card));
28257 uint8_t* o = last_object;
28259 o = find_first_object (start_address, last_object);
28260 // Never visit an object twice.
28261 assert (o >= last_object);
28263 //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
28264 dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
28265 card, (size_t)o, (size_t)limit, (size_t)gen_boundary));
28269 assert (Align (size (o)) >= Align (min_obj_size));
28270 size_t s = size (o);
28272 uint8_t* next_o = o + Align (s);
28275 if ((o >= gen_boundary) &&
28276 (seg == ephemeral_heap_segment))
28278 dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary));
28280 assert ((curr_gen_number > 0));
28281 gen_boundary = generation_allocation_start
28282 (generation_of (curr_gen_number - 1));
28283 next_boundary = (compute_next_boundary
28284 (low, curr_gen_number, relocating));
28287 dprintf (4, ("|%Ix|", (size_t)o));
28289 if (next_o < start_address)
28294 #ifdef BACKGROUND_GC
28295 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
28299 #endif //BACKGROUND_GC
28301 #ifdef COLLECTIBLE_CLASS
28302 if (is_collectible(o))
28304 BOOL passed_end_card_p = FALSE;
28306 if (card_of (o) > card)
28308 passed_end_card_p = card_transition (o, end, card_word_end,
28312 foundp, start_address,
28313 limit, total_cards_cleared);
28316 if ((!passed_end_card_p || foundp) && (card_of (o) == card))
28318 // card is valid and it covers the head of the object
28319 if (fn == &gc_heap::relocate_address)
28321 keep_card_live (o, n_gen, cg_pointers_found);
28325 uint8_t* class_obj = get_class_object (o);
28326 mark_through_cards_helper (&class_obj, n_gen,
28327 cg_pointers_found, fn,
28328 nhigh, next_boundary);
28332 if (passed_end_card_p)
28334 if (foundp && (card_address (card) < next_o))
28336 goto go_through_refs;
28338 else if (foundp && (start_address < limit))
28340 next_o = find_first_object (start_address, o);
28349 #endif //COLLECTIBLE_CLASS
28351 if (contain_pointers (o))
28353 dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address));
28356 dprintf (4, ("normal object path"));
28358 (method_table(o), o, s, poo,
28359 start_address, use_start, (o + s),
28361 dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo));
28362 if (card_of ((uint8_t*)poo) > card)
28364 BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end,
28369 foundp, start_address,
28370 limit, total_cards_cleared);
28372 if (passed_end_card_p)
28374 if (foundp && (card_address (card) < next_o))
28378 if (ppstop <= (uint8_t**)start_address)
28380 else if (poo < (uint8_t**)start_address)
28381 {poo = (uint8_t**)start_address;}
28384 else if (foundp && (start_address < limit))
28386 next_o = find_first_object (start_address, o);
28394 mark_through_cards_helper (poo, n_gen,
28395 cg_pointers_found, fn,
28396 nhigh, next_boundary);
28403 if (((size_t)next_o / brick_size) != ((size_t) o / brick_size))
28405 if (brick_table [brick_of (o)] <0)
28406 fix_brick_to_highest (o, next_o);
28414 // compute the efficiency ratio of the card table
28417 generation_skip_ratio = ((n_eph > 400)? (int)(((float)n_gen / (float)n_eph) * 100) : 100);
28418 dprintf (3, ("Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d",
28419 n_eph, n_gen , n_card_set, total_cards_cleared, generation_skip_ratio));
28423 dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d",
28424 n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio));
28428 #ifdef SEG_REUSE_STATS
28429 size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size)
28431 size_t total_items = 0;
28433 for (int i = 0; i < count; i++)
28435 total_items += ordered_indices[i];
28436 *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i);
28437 dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i)));
28439 dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size));
28440 return total_items;
28442 #endif // SEG_REUSE_STATS
28444 void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug)
28446 // detect pinned plugs
28447 if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
28449 deque_pinned_plug();
28450 update_oldest_pinned_plug();
28451 dprintf (3, ("dequed pin,now oldest pin is %Ix", pinned_plug (oldest_pin())));
28455 size_t plug_size = last_plug_size + Align(min_obj_size);
28456 BOOL is_padded = FALSE;
28459 plug_size += Align (min_obj_size);
28461 #endif //SHORT_PLUGS
28463 #ifdef RESPECT_LARGE_ALIGNMENT
28464 plug_size += switch_alignment_size (is_padded);
28465 #endif //RESPECT_LARGE_ALIGNMENT
28467 total_ephemeral_plugs += plug_size;
28468 size_t plug_size_power2 = round_up_power2 (plug_size);
28469 ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++;
28470 dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array",
28474 (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2)));
28478 void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug)
28480 assert ((tree != NULL));
28481 if (node_left_child (tree))
28483 count_plugs_in_brick (tree + node_left_child (tree), last_plug);
28486 if (last_plug != 0)
28488 uint8_t* plug = tree;
28489 size_t gap_size = node_gap_size (plug);
28490 uint8_t* gap = (plug - gap_size);
28491 uint8_t* last_plug_end = gap;
28492 size_t last_plug_size = (last_plug_end - last_plug);
28493 dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix",
28494 tree, last_plug, gap_size, gap, last_plug_size));
28496 if (tree == oldest_pinned_plug)
28498 dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix",
28499 tree, last_plug, last_plug_size));
28500 mark* m = oldest_pin();
28501 if (m->has_pre_plug_info())
28503 last_plug_size += sizeof (gap_reloc_pair);
28504 dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size));
28507 // Can't assert here - if it's a pinned plug it can be less.
28508 //assert (last_plug_size >= Align (min_obj_size));
28510 count_plug (last_plug_size, last_plug);
28515 if (node_right_child (tree))
28517 count_plugs_in_brick (tree + node_right_child (tree), last_plug);
28521 void gc_heap::build_ordered_plug_indices ()
28523 memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices));
28524 memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices));
28526 uint8_t* start_address = generation_limit (max_generation);
28527 uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment);
28528 size_t current_brick = brick_of (start_address);
28529 size_t end_brick = brick_of (end_address - 1);
28530 uint8_t* last_plug = 0;
28532 //Look for the right pinned plug to start from.
28533 reset_pinned_queue_bos();
28534 while (!pinned_plug_que_empty_p())
28536 mark* m = oldest_pin();
28537 if ((m->first >= start_address) && (m->first < end_address))
28539 dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address));
28544 deque_pinned_plug();
28547 update_oldest_pinned_plug();
28549 while (current_brick <= end_brick)
28551 int brick_entry = brick_table [ current_brick ];
28552 if (brick_entry >= 0)
28554 count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug);
28562 count_plug (end_address - last_plug, last_plug);
28565 // we need to make sure that after fitting all the existing plugs, we
28566 // have big enough free space left to guarantee that the next allocation
28568 size_t extra_size = END_SPACE_AFTER_GC + Align (min_obj_size);
28569 total_ephemeral_plugs += extra_size;
28570 dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs"));
28571 ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++;
28573 memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices));
28575 #ifdef SEG_REUSE_STATS
28576 dprintf (SEG_REUSE_LOG_0, ("Plugs:"));
28577 size_t total_plug_power2 = 0;
28578 dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2);
28579 dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))",
28580 total_ephemeral_plugs,
28582 (total_ephemeral_plugs ?
28583 (total_plug_power2 * 100 / total_ephemeral_plugs) :
28585 dprintf (SEG_REUSE_LOG_0, ("-------------------"));
28586 #endif // SEG_REUSE_STATS
28589 void gc_heap::init_ordered_free_space_indices ()
28591 memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices));
28592 memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices));
28595 void gc_heap::trim_free_spaces_indices ()
28597 trimmed_free_space_index = -1;
28598 size_t max_count = max_free_space_items - 1;
28601 for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--)
28603 count += ordered_free_space_indices[i];
28605 if (count >= max_count)
28611 ptrdiff_t extra_free_space_items = count - max_count;
28613 if (extra_free_space_items > 0)
28615 ordered_free_space_indices[i] -= extra_free_space_items;
28616 free_space_items = max_count;
28617 trimmed_free_space_index = i;
28621 free_space_items = count;
28629 free_space_buckets = MAX_NUM_BUCKETS - i;
28631 for (--i; i >= 0; i--)
28633 ordered_free_space_indices[i] = 0;
28636 memcpy (saved_ordered_free_space_indices,
28637 ordered_free_space_indices,
28638 sizeof(ordered_free_space_indices));
28641 // We fit as many plugs as we can and update the number of plugs left and the number
28642 // of free spaces left.
28643 BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index)
28645 assert (small_index <= big_index);
28646 assert (big_index < MAX_NUM_BUCKETS);
28648 size_t small_blocks = ordered_blocks[small_index];
28650 if (small_blocks == 0)
28655 size_t big_spaces = ordered_spaces[big_index];
28657 if (big_spaces == 0)
28662 dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces",
28664 small_blocks, (small_index + MIN_INDEX_POWER2),
28665 big_spaces, (big_index + MIN_INDEX_POWER2)));
28667 size_t big_to_small = big_spaces << (big_index - small_index);
28669 ptrdiff_t extra_small_spaces = big_to_small - small_blocks;
28670 dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks",
28672 big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2)));
28673 BOOL can_fit = (extra_small_spaces >= 0);
28677 dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks",
28679 extra_small_spaces, (small_index + MIN_INDEX_POWER2)));
28684 dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2)));
28685 ordered_spaces[big_index] = 0;
28686 if (extra_small_spaces > 0)
28688 dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2)));
28689 ordered_blocks[small_index] = 0;
28690 for (i = small_index; i < big_index; i++)
28692 if (extra_small_spaces & 1)
28694 dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d",
28696 (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1)));
28697 ordered_spaces[i] += 1;
28699 extra_small_spaces >>= 1;
28702 dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d",
28704 (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces)));
28705 ordered_spaces[i] += extra_small_spaces;
28709 dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d",
28711 (small_index + MIN_INDEX_POWER2),
28712 ordered_blocks[small_index],
28713 (ordered_blocks[small_index] - big_to_small)));
28714 ordered_blocks[small_index] -= big_to_small;
28717 #ifdef SEG_REUSE_STATS
28719 dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number));
28720 dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp);
28722 dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number));
28723 dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp);
28724 #endif //SEG_REUSE_STATS
28729 // space_index gets updated to the biggest available space index.
28730 BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index)
28732 assert (*space_index >= block_index);
28734 while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index))
28737 if (*space_index < block_index)
28746 BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count)
28748 #ifdef FEATURE_STRUCTALIGN
28749 // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account
28751 #endif // FEATURE_STRUCTALIGN
28752 int space_index = count - 1;
28753 for (int block_index = (count - 1); block_index >= 0; block_index--)
28755 if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index))
28764 void gc_heap::build_ordered_free_spaces (heap_segment* seg)
28766 assert (bestfit_seg);
28768 //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2,
28769 // ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets),
28770 // free_space_buckets,
28771 // free_space_items);
28773 bestfit_seg->add_buckets (MIN_INDEX_POWER2,
28774 ordered_free_space_indices,
28778 assert (settings.condemned_generation == max_generation);
28780 uint8_t* first_address = heap_segment_mem (seg);
28781 uint8_t* end_address = heap_segment_reserved (seg);
28782 //look through the pinned plugs for relevant ones.
28783 //Look for the right pinned plug to start from.
28784 reset_pinned_queue_bos();
28786 // See comment in can_expand_into_p why we need (max_generation + 1).
28787 size_t eph_gen_starts = (Align (min_obj_size)) * (max_generation + 1);
28788 BOOL has_fit_gen_starts = FALSE;
28790 while (!pinned_plug_que_empty_p())
28793 if ((pinned_plug (m) >= first_address) &&
28794 (pinned_plug (m) < end_address) &&
28795 (pinned_len (m) >= eph_gen_starts))
28798 assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin);
28803 deque_pinned_plug();
28807 if (!pinned_plug_que_empty_p())
28809 bestfit_seg->add ((void*)m, TRUE, TRUE);
28810 deque_pinned_plug();
28812 has_fit_gen_starts = TRUE;
28815 while (!pinned_plug_que_empty_p() &&
28816 ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
28818 bestfit_seg->add ((void*)m, TRUE, FALSE);
28819 deque_pinned_plug();
28823 if (commit_end_of_seg)
28825 if (!has_fit_gen_starts)
28827 assert (bestfit_first_pin == heap_segment_plan_allocated (seg));
28829 bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts));
28833 bestfit_seg->check();
28837 BOOL gc_heap::try_best_fit (BOOL end_of_segment_p)
28839 if (!end_of_segment_p)
28841 trim_free_spaces_indices ();
28844 BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices,
28845 ordered_free_space_indices,
28848 return can_bestfit;
28851 BOOL gc_heap::best_fit (size_t free_space,
28852 size_t largest_free_space,
28853 size_t additional_space,
28854 BOOL* use_additional_space)
28856 dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation));
28858 assert (!additional_space || (additional_space && use_additional_space));
28859 if (use_additional_space)
28861 *use_additional_space = FALSE;
28864 if (ordered_plug_indices_init == FALSE)
28866 total_ephemeral_plugs = 0;
28867 build_ordered_plug_indices();
28868 ordered_plug_indices_init = TRUE;
28872 memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices));
28875 if (total_ephemeral_plugs == (END_SPACE_AFTER_GC + Align (min_obj_size)))
28877 dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done"));
28878 size_t empty_eph = (END_SPACE_AFTER_GC + Align (min_obj_size) + (Align (min_obj_size)) * (max_generation + 1));
28879 BOOL can_fit_empty_eph = (largest_free_space >= empty_eph);
28880 if (!can_fit_empty_eph)
28882 can_fit_empty_eph = (additional_space >= empty_eph);
28884 if (can_fit_empty_eph)
28886 *use_additional_space = TRUE;
28890 return can_fit_empty_eph;
28893 if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space))
28895 dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done"));
28899 if ((free_space + additional_space) == 0)
28901 dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done"));
28905 #ifdef SEG_REUSE_STATS
28906 dprintf (SEG_REUSE_LOG_0, ("Free spaces:"));
28907 size_t total_free_space_power2 = 0;
28908 size_t total_free_space_items =
28909 dump_buckets (ordered_free_space_indices,
28911 &total_free_space_power2);
28912 dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items));
28914 dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix",
28915 total_ephemeral_plugs,
28917 total_free_space_power2,
28918 (free_space ? (total_free_space_power2 * 100 / free_space) : 0),
28919 additional_space));
28921 size_t saved_all_free_space_indices[MAX_NUM_BUCKETS];
28922 memcpy (saved_all_free_space_indices,
28923 ordered_free_space_indices,
28924 sizeof(saved_all_free_space_indices));
28926 #endif // SEG_REUSE_STATS
28928 if (total_ephemeral_plugs > (free_space + additional_space))
28933 use_bestfit = try_best_fit(FALSE);
28935 if (!use_bestfit && additional_space)
28937 int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space));
28939 if (relative_free_space_index != -1)
28941 int relative_plug_index = 0;
28942 size_t plugs_to_fit = 0;
28944 for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--)
28946 plugs_to_fit = ordered_plug_indices[relative_plug_index];
28947 if (plugs_to_fit != 0)
28953 if ((relative_plug_index > relative_free_space_index) ||
28954 ((relative_plug_index == relative_free_space_index) &&
28955 (plugs_to_fit > 1)))
28957 #ifdef SEG_REUSE_STATS
28958 dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)",
28959 (relative_free_space_index + MIN_INDEX_POWER2),
28961 (relative_plug_index + MIN_INDEX_POWER2)));
28962 #endif // SEG_REUSE_STATS
28966 dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2)));
28967 ordered_free_space_indices[relative_free_space_index]++;
28968 use_bestfit = try_best_fit(TRUE);
28971 free_space_items++;
28972 // Since we might've trimmed away some of the free spaces we had, we should see
28973 // if we really need to use end of seg space - if it's the same or smaller than
28974 // the largest space we trimmed we can just add that one back instead of
28975 // using end of seg.
28976 if (relative_free_space_index > trimmed_free_space_index)
28978 *use_additional_space = TRUE;
28982 // If the addition space is <= than the last trimmed space, we
28983 // should just use that last trimmed space instead.
28984 saved_ordered_free_space_indices[trimmed_free_space_index]++;
28994 dprintf (SEG_REUSE_LOG_0, ("couldn't fit..."));
28996 #ifdef SEG_REUSE_STATS
28997 size_t saved_max = max_free_space_items;
28998 BOOL temp_bestfit = FALSE;
29000 dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----"));
29001 dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items));
29003 // TODO: need to take the end of segment into consideration.
29004 while (max_free_space_items <= total_free_space_items)
29006 max_free_space_items += max_free_space_items / 2;
29007 dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items));
29008 memcpy (ordered_free_space_indices,
29009 saved_all_free_space_indices,
29010 sizeof(ordered_free_space_indices));
29011 if (try_best_fit(FALSE))
29013 temp_bestfit = TRUE;
29020 dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items));
29024 dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space"));
29027 dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max));
29028 max_free_space_items = saved_max;
29029 #endif // SEG_REUSE_STATS
29030 if (free_space_items)
29032 max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2);
29033 max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES);
29037 max_free_space_items = MAX_NUM_FREE_SPACES;
29041 dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items));
29042 dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n"));
29044 return use_bestfit;
29047 BOOL gc_heap::process_free_space (heap_segment* seg,
29049 size_t min_free_size,
29050 size_t min_cont_size,
29051 size_t* total_free_space,
29052 size_t* largest_free_space)
29054 *total_free_space += free_space;
29055 *largest_free_space = max (*largest_free_space, free_space);
29057 #ifdef SIMPLE_DPRINTF
29058 dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix",
29059 free_space, *total_free_space, *largest_free_space));
29060 #endif //SIMPLE_DPRINTF
29062 if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size))
29064 #ifdef SIMPLE_DPRINTF
29065 dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit",
29066 settings.condemned_generation,
29067 *total_free_space, min_free_size, *largest_free_space, min_cont_size,
29070 UNREFERENCED_PARAMETER(seg);
29071 #endif //SIMPLE_DPRINTF
29075 int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space));
29076 if (free_space_index != -1)
29078 ordered_free_space_indices[free_space_index]++;
29083 BOOL gc_heap::expand_reused_seg_p()
29085 BOOL reused_seg = FALSE;
29086 int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
29087 if ((heap_expand_mechanism == expand_reuse_bestfit) ||
29088 (heap_expand_mechanism == expand_reuse_normal))
29096 BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size,
29097 allocator* gen_allocator)
29099 min_cont_size += END_SPACE_AFTER_GC;
29100 use_bestfit = FALSE;
29101 commit_end_of_seg = FALSE;
29102 bestfit_first_pin = 0;
29103 uint8_t* first_address = heap_segment_mem (seg);
29104 uint8_t* end_address = heap_segment_reserved (seg);
29105 size_t end_extra_space = end_space_after_gc();
29107 if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg))
29109 dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end",
29110 first_address, end_address, end_extra_space));
29114 end_address -= end_extra_space;
29116 dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix",
29117 settings.condemned_generation, min_free_size, min_cont_size));
29118 size_t eph_gen_starts = eph_gen_starts_size;
29120 if (settings.condemned_generation == max_generation)
29122 size_t free_space = 0;
29123 size_t largest_free_space = free_space;
29124 dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address));
29125 //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from.
29126 //We are going to allocate the generation starts in the 1st free space,
29127 //so start from the first free space that's big enough for gen starts and a min object size.
29128 // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it -
29129 // we could use it by allocating the last generation start a bit bigger but
29130 // the complexity isn't worth the effort (those plugs are from gen2
29131 // already anyway).
29132 reset_pinned_queue_bos();
29134 BOOL has_fit_gen_starts = FALSE;
29136 init_ordered_free_space_indices ();
29137 while (!pinned_plug_que_empty_p())
29140 if ((pinned_plug (m) >= first_address) &&
29141 (pinned_plug (m) < end_address) &&
29142 (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size))))
29148 deque_pinned_plug();
29152 if (!pinned_plug_que_empty_p())
29154 bestfit_first_pin = pinned_plug (m) - pinned_len (m);
29156 if (process_free_space (seg,
29157 pinned_len (m) - eph_gen_starts,
29158 min_free_size, min_cont_size,
29159 &free_space, &largest_free_space))
29164 deque_pinned_plug();
29166 has_fit_gen_starts = TRUE;
29169 dprintf (3, ("first pin is %Ix", pinned_plug (m)));
29171 //tally up free space
29172 while (!pinned_plug_que_empty_p() &&
29173 ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
29175 dprintf (3, ("looking at pin %Ix", pinned_plug (m)));
29176 if (process_free_space (seg,
29178 min_free_size, min_cont_size,
29179 &free_space, &largest_free_space))
29184 deque_pinned_plug();
29188 //try to find space at the end of the segment.
29189 size_t end_space = (end_address - heap_segment_plan_allocated (seg));
29190 size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0);
29191 dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space));
29192 if (end_space >= additional_space)
29194 BOOL can_fit = TRUE;
29195 commit_end_of_seg = TRUE;
29197 if (largest_free_space < min_cont_size)
29199 if (end_space >= min_cont_size)
29201 additional_space = max (min_cont_size, additional_space);
29202 dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph",
29207 if (settings.concurrent)
29210 commit_end_of_seg = FALSE;
29214 size_t additional_space_bestfit = additional_space;
29215 if (!has_fit_gen_starts)
29217 if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size)))
29219 dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id",
29220 additional_space_bestfit));
29224 bestfit_first_pin = heap_segment_plan_allocated (seg);
29225 additional_space_bestfit -= eph_gen_starts;
29228 can_fit = best_fit (free_space,
29229 largest_free_space,
29230 additional_space_bestfit,
29231 &commit_end_of_seg);
29235 dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg",
29236 seg, (commit_end_of_seg ? "with" : "without")));
29240 dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
29247 dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg));
29250 assert (additional_space <= end_space);
29251 if (commit_end_of_seg)
29253 if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space))
29255 dprintf (2, ("Couldn't commit end of segment?!"));
29256 use_bestfit = FALSE;
29263 // We increase the index here because growing heap segment could create a discrepency with
29264 // the additional space we used (could be bigger).
29265 size_t free_space_end_of_seg =
29266 heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
29267 int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg));
29268 saved_ordered_free_space_indices[relative_free_space_index]++;
29274 memcpy (ordered_free_space_indices,
29275 saved_ordered_free_space_indices,
29276 sizeof(ordered_free_space_indices));
29277 max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2);
29278 max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items);
29279 dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items));
29285 dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
29290 assert (settings.condemned_generation == (max_generation-1));
29291 size_t free_space = (end_address - heap_segment_plan_allocated (seg));
29292 size_t largest_free_space = free_space;
29293 dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address));
29294 //find the first free list in range of the current segment
29295 size_t sz_list = gen_allocator->first_bucket_size();
29296 unsigned int a_l_idx = 0;
29297 uint8_t* free_list = 0;
29298 for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
29300 if ((eph_gen_starts <= sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
29302 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
29305 if ((free_list >= first_address) &&
29306 (free_list < end_address) &&
29307 (unused_array_size (free_list) >= eph_gen_starts))
29313 free_list = free_list_slot (free_list);
29321 init_ordered_free_space_indices ();
29322 if (process_free_space (seg,
29323 unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size),
29324 min_free_size, min_cont_size,
29325 &free_space, &largest_free_space))
29330 free_list = free_list_slot (free_list);
29334 dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list"));
29338 //tally up free space
29344 if ((free_list >= first_address) && (free_list < end_address) &&
29345 process_free_space (seg,
29346 unused_array_size (free_list),
29347 min_free_size, min_cont_size,
29348 &free_space, &largest_free_space))
29353 free_list = free_list_slot (free_list);
29356 if (a_l_idx < gen_allocator->number_of_buckets())
29358 free_list = gen_allocator->alloc_list_head_of (a_l_idx);
29364 dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
29368 BOOL can_fit = best_fit (free_space, 0, NULL);
29371 dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg));
29375 dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
29383 void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
29384 generation* gen, uint8_t* start_address,
29385 unsigned int& active_new_gen_number,
29386 uint8_t*& last_pinned_gap, BOOL& leftp,
29389 , mark* pinned_plug_entry
29390 #endif //SHORT_PLUGS
29393 // detect generation boundaries
29394 // make sure that active_new_gen_number is not the youngest generation.
29395 // because the generation_limit wouldn't return the right thing in this case.
29398 if ((active_new_gen_number > 1) &&
29399 (last_plug >= generation_limit (active_new_gen_number)))
29401 assert (last_plug >= start_address);
29402 active_new_gen_number--;
29403 realloc_plan_generation_start (generation_of (active_new_gen_number), gen);
29404 assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
29409 // detect pinned plugs
29410 if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
29412 size_t entry = deque_pinned_plug();
29413 mark* m = pinned_plug_of (entry);
29415 size_t saved_pinned_len = pinned_len(m);
29416 pinned_len(m) = last_plug - last_pinned_gap;
29417 //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug));
29419 if (m->has_post_plug_info())
29421 last_plug_size += sizeof (gap_reloc_pair);
29422 dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
29425 last_pinned_gap = last_plug + last_plug_size;
29426 dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix",
29427 pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size));
29430 //we are creating a generation fault. set the cards.
29432 size_t end_card = card_of (align_on_card (last_plug + last_plug_size));
29433 size_t card = card_of (last_plug);
29434 while (card != end_card)
29441 else if (last_plug >= start_address)
29443 #ifdef FEATURE_STRUCTALIGN
29444 int requiredAlignment;
29446 node_aligninfo (last_plug, requiredAlignment, pad);
29448 // from how we previously aligned the plug's destination address,
29449 // compute the actual alignment offset.
29450 uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug);
29451 ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0);
29452 if (!alignmentOffset)
29454 // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero.
29455 alignmentOffset = requiredAlignment;
29458 //clear the alignment info because we are reallocating
29459 clear_node_aligninfo (last_plug);
29460 #else // FEATURE_STRUCTALIGN
29461 //clear the realignment flag because we are reallocating
29462 clear_node_realigned (last_plug);
29463 #endif // FEATURE_STRUCTALIGN
29464 BOOL adjacentp = FALSE;
29465 BOOL set_padding_on_saved_p = FALSE;
29469 last_plug_size += sizeof (gap_reloc_pair);
29472 assert (pinned_plug_entry != NULL);
29473 if (last_plug_size <= sizeof (plug_and_gap))
29475 set_padding_on_saved_p = TRUE;
29477 #endif //SHORT_PLUGS
29479 dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
29483 clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry);
29484 #endif //SHORT_PLUGS
29486 uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug,
29488 set_padding_on_saved_p,
29490 #endif //SHORT_PLUGS
29491 TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG);
29493 dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size));
29494 assert (new_address);
29495 set_node_relocation_distance (last_plug, new_address - last_plug);
29496 #ifdef FEATURE_STRUCTALIGN
29497 if (leftp && node_alignpad (last_plug) == 0)
29498 #else // FEATURE_STRUCTALIGN
29499 if (leftp && !node_realigned (last_plug))
29500 #endif // FEATURE_STRUCTALIGN
29502 // TODO - temporarily disable L optimization because of a bug in it.
29503 //set_node_left (last_plug);
29505 dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size));
29510 void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug,
29511 uint8_t* start_address,
29513 unsigned int& active_new_gen_number,
29514 uint8_t*& last_pinned_gap, BOOL& leftp)
29516 assert (tree != NULL);
29517 int left_node = node_left_child (tree);
29518 int right_node = node_right_child (tree);
29520 dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d",
29521 tree, last_pinned_gap, last_plug, left_node, right_node));
29525 dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug));
29526 realloc_in_brick ((tree + left_node), last_plug, start_address,
29527 gen, active_new_gen_number, last_pinned_gap,
29531 if (last_plug != 0)
29533 uint8_t* plug = tree;
29535 BOOL has_pre_plug_info_p = FALSE;
29536 BOOL has_post_plug_info_p = FALSE;
29537 mark* pinned_plug_entry = get_next_pinned_entry (tree,
29538 &has_pre_plug_info_p,
29539 &has_post_plug_info_p,
29542 // We only care about the pre plug info 'cause that's what decides if the last plug is shortened.
29543 // The pinned plugs are handled in realloc_plug.
29544 size_t gap_size = node_gap_size (plug);
29545 uint8_t* gap = (plug - gap_size);
29546 uint8_t* last_plug_end = gap;
29547 size_t last_plug_size = (last_plug_end - last_plug);
29548 // Cannot assert this - a plug could be less than that due to the shortened ones.
29549 //assert (last_plug_size >= Align (min_obj_size));
29550 dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d",
29551 plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0)));
29552 realloc_plug (last_plug_size, last_plug, gen, start_address,
29553 active_new_gen_number, last_pinned_gap,
29554 leftp, has_pre_plug_info_p
29556 , pinned_plug_entry
29557 #endif //SHORT_PLUGS
29565 dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug));
29566 realloc_in_brick ((tree + right_node), last_plug, start_address,
29567 gen, active_new_gen_number, last_pinned_gap,
29573 gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg,
29574 uint8_t* start_address, uint8_t* end_address,
29575 unsigned active_new_gen_number)
29577 dprintf (3, ("--- Reallocing ---"));
29581 //make sure that every generation has a planned allocation start
29582 int gen_number = max_generation - 1;
29583 while (gen_number >= 0)
29585 generation* gen = generation_of (gen_number);
29586 if (0 == generation_plan_allocation_start (gen))
29588 generation_plan_allocation_start (gen) =
29589 bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size);
29590 generation_plan_allocation_start_size (gen) = Align (min_obj_size);
29591 assert (generation_plan_allocation_start (gen));
29597 uint8_t* first_address = start_address;
29598 //Look for the right pinned plug to start from.
29599 reset_pinned_queue_bos();
29600 uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg);
29601 while (!pinned_plug_que_empty_p())
29603 mark* m = oldest_pin();
29604 if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address))
29606 if (pinned_plug (m) < first_address)
29608 first_address = pinned_plug (m);
29613 deque_pinned_plug();
29616 size_t current_brick = brick_of (first_address);
29617 size_t end_brick = brick_of (end_address-1);
29618 uint8_t* last_plug = 0;
29620 uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg);
29621 BOOL leftp = FALSE;
29623 dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix",
29624 start_address, first_address, pinned_plug (oldest_pin())));
29626 while (current_brick <= end_brick)
29628 int brick_entry = brick_table [ current_brick ];
29629 if (brick_entry >= 0)
29631 realloc_in_brick ((brick_address (current_brick) + brick_entry - 1),
29632 last_plug, start_address, consing_gen,
29633 active_new_gen_number, last_pinned_gap,
29639 if (last_plug != 0)
29641 realloc_plug (end_address - last_plug, last_plug, consing_gen,
29643 active_new_gen_number, last_pinned_gap,
29647 #endif //SHORT_PLUGS
29651 //Fix the old segment allocated size
29652 assert (last_pinned_gap >= heap_segment_mem (seg));
29653 assert (last_pinned_gap <= heap_segment_committed (seg));
29654 heap_segment_plan_allocated (seg) = last_pinned_gap;
29657 void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end)
29660 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
29662 BOOL contains_pinned_plugs = FALSE;
29665 while (mi != mark_stack_tos)
29667 m = pinned_plug_of (mi);
29668 if ((pinned_plug (m) >= start) && (pinned_plug (m) < end))
29670 contains_pinned_plugs = TRUE;
29677 if (contains_pinned_plugs)
29682 #endif //VERIFY_HEAP
29685 void gc_heap::set_expand_in_full_gc (int condemned_gen_number)
29687 if (!should_expand_in_full_gc)
29689 if ((condemned_gen_number != max_generation) &&
29690 (settings.pause_mode != pause_low_latency) &&
29691 (settings.pause_mode != pause_sustained_low_latency))
29693 should_expand_in_full_gc = TRUE;
29698 void gc_heap::save_ephemeral_generation_starts()
29700 for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++)
29702 saved_ephemeral_plan_start[ephemeral_generation] =
29703 generation_plan_allocation_start (generation_of (ephemeral_generation));
29704 saved_ephemeral_plan_start_size[ephemeral_generation] =
29705 generation_plan_allocation_start_size (generation_of (ephemeral_generation));
29709 generation* gc_heap::expand_heap (int condemned_generation,
29710 generation* consing_gen,
29711 heap_segment* new_heap_segment)
29713 UNREFERENCED_PARAMETER(condemned_generation);
29714 assert (condemned_generation >= (max_generation -1));
29715 unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
29716 uint8_t* start_address = generation_limit (max_generation);
29717 uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment);
29718 BOOL should_promote_ephemeral = FALSE;
29719 ptrdiff_t eph_size = total_ephemeral_size;
29720 #ifdef BACKGROUND_GC
29721 dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
29722 #endif //BACKGROUND_GC
29723 settings.heap_expansion = TRUE;
29725 #ifdef BACKGROUND_GC
29726 if (cm_in_progress)
29728 if (!expanded_in_fgc)
29730 expanded_in_fgc = TRUE;
29733 #endif //BACKGROUND_GC
29735 //reset the elevation state for next time.
29736 dprintf (2, ("Elevation: elevation = el_none"));
29737 if (settings.should_lock_elevation && !expand_reused_seg_p())
29738 settings.should_lock_elevation = FALSE;
29740 heap_segment* new_seg = new_heap_segment;
29743 return consing_gen;
29745 //copy the card and brick tables
29746 if (g_gc_card_table!= card_table)
29747 copy_brick_card_table();
29749 BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
29750 dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p));
29752 assert (generation_plan_allocation_start (generation_of (max_generation-1)));
29753 assert (generation_plan_allocation_start (generation_of (max_generation-1)) >=
29754 heap_segment_mem (ephemeral_heap_segment));
29755 assert (generation_plan_allocation_start (generation_of (max_generation-1)) <=
29756 heap_segment_committed (ephemeral_heap_segment));
29758 assert (generation_plan_allocation_start (youngest_generation));
29759 assert (generation_plan_allocation_start (youngest_generation) <
29760 heap_segment_plan_allocated (ephemeral_heap_segment));
29762 if (settings.pause_mode == pause_no_gc)
29764 // We don't reuse for no gc, so the size used on the new eph seg is eph_size.
29765 if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc))
29766 should_promote_ephemeral = TRUE;
29772 should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral);
29776 if (should_promote_ephemeral)
29778 ephemeral_promotion = TRUE;
29779 get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep);
29780 dprintf (2, ("promoting ephemeral"));
29781 save_ephemeral_generation_starts();
29785 // commit the new ephemeral segment all at once if it is a new one.
29786 if ((eph_size > 0) && new_segment_p)
29788 #ifdef FEATURE_STRUCTALIGN
29789 // The destination may require a larger alignment padding than the source.
29790 // Assume the worst possible alignment padding.
29791 eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET);
29792 #endif // FEATURE_STRUCTALIGN
29793 #ifdef RESPECT_LARGE_ALIGNMENT
29794 //Since the generation start can be larger than min_obj_size
29795 //The alignment could be switched.
29796 eph_size += switch_alignment_size(FALSE);
29797 #endif //RESPECT_LARGE_ALIGNMENT
29798 //Since the generation start can be larger than min_obj_size
29799 //Compare the alignment of the first object in gen1
29800 if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0)
29802 fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE);
29803 return consing_gen;
29805 heap_segment_used (new_seg) = heap_segment_committed (new_seg);
29808 //Fix the end of the old ephemeral heap segment
29809 heap_segment_plan_allocated (ephemeral_heap_segment) =
29810 generation_plan_allocation_start (generation_of (max_generation-1));
29812 dprintf (3, ("Old ephemeral allocated set to %Ix",
29813 (size_t)heap_segment_plan_allocated (ephemeral_heap_segment)));
29818 // TODO - Is this really necessary? We should think about it.
29819 //initialize the first brick
29820 size_t first_brick = brick_of (heap_segment_mem (new_seg));
29821 set_brick (first_brick,
29822 heap_segment_mem (new_seg) - brick_address (first_brick));
29825 //From this point on, we cannot run out of memory
29827 //reset the allocation of the consing generation back to the end of the
29828 //old ephemeral segment
29829 generation_allocation_limit (consing_gen) =
29830 heap_segment_plan_allocated (ephemeral_heap_segment);
29831 generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen);
29832 generation_allocation_segment (consing_gen) = ephemeral_heap_segment;
29834 //clear the generation gap for all of the ephemeral generations
29836 int generation_num = max_generation-1;
29837 while (generation_num >= 0)
29839 generation* gen = generation_of (generation_num);
29840 generation_plan_allocation_start (gen) = 0;
29845 heap_segment* old_seg = ephemeral_heap_segment;
29846 ephemeral_heap_segment = new_seg;
29848 //Note: the ephemeral segment shouldn't be threaded onto the segment chain
29849 //because the relocation and compact phases shouldn't see it
29851 // set the generation members used by allocate_in_expanded_heap
29852 // and switch to ephemeral generation
29853 consing_gen = ensure_ephemeral_heap_segment (consing_gen);
29855 if (!should_promote_ephemeral)
29857 realloc_plugs (consing_gen, old_seg, start_address, end_address,
29858 active_new_gen_number);
29863 repair_allocation_in_expanded_heap (consing_gen);
29866 // assert that the generation gap for all of the ephemeral generations were allocated.
29869 int generation_num = max_generation-1;
29870 while (generation_num >= 0)
29872 generation* gen = generation_of (generation_num);
29873 assert (generation_plan_allocation_start (gen));
29879 if (!new_segment_p)
29881 dprintf (2, ("Demoting ephemeral segment"));
29882 //demote the entire segment.
29883 settings.demotion = TRUE;
29884 get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
29885 demotion_low = heap_segment_mem (ephemeral_heap_segment);
29886 demotion_high = heap_segment_reserved (ephemeral_heap_segment);
29890 demotion_low = MAX_PTR;
29892 #ifndef MULTIPLE_HEAPS
29893 settings.demotion = FALSE;
29894 get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
29895 #endif //!MULTIPLE_HEAPS
29897 ptrdiff_t eph_size1 = total_ephemeral_size;
29898 MAYBE_UNUSED_VAR(eph_size1);
29900 if (!should_promote_ephemeral && new_segment_p)
29902 assert (eph_size1 <= eph_size);
29905 if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
29907 // This is to catch when we accidently delete a segment that has pins.
29908 verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg));
29911 verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg));
29913 dprintf(2,("---- End of Heap Expansion ----"));
29914 return consing_gen;
29917 void gc_heap::set_static_data()
29919 static_data* pause_mode_sdata = static_data_table[latency_level];
29920 for (int i = 0; i < NUMBERGENERATIONS; i++)
29922 dynamic_data* dd = dynamic_data_of (i);
29923 static_data* sdata = &pause_mode_sdata[i];
29926 dd->min_size = sdata->min_size;
29928 dprintf (GTC_LOG, ("PM: %d, gen%d: min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%",
29929 settings.pause_mode,i,
29930 dd->min_size, dd_max_size (dd),
29931 sdata->fragmentation_limit, (int)(sdata->fragmentation_burden_limit * 100)));
29935 // Initialize the values that are not const.
29936 void gc_heap::init_static_data()
29938 size_t gen0_min_size = get_gen0_min_size();
29940 size_t gen0_max_size =
29941 #ifdef MULTIPLE_HEAPS
29942 max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024));
29943 #else //MULTIPLE_HEAPS
29944 (gc_can_use_concurrent ?
29946 max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024)));
29947 #endif //MULTIPLE_HEAPS
29949 if (heap_hard_limit)
29951 size_t gen0_max_size_seg = soh_segment_size / 4;
29952 dprintf (GTC_LOG, ("limit gen0 max %Id->%Id", gen0_max_size, gen0_max_size_seg));
29953 gen0_max_size = min (gen0_max_size, gen0_max_size_seg);
29956 size_t gen0_max_size_config = (size_t)GCConfig::GetGCGen0MaxBudget();
29958 if (gen0_max_size_config)
29960 gen0_max_size = min (gen0_max_size, gen0_max_size_config);
29963 gen0_max_size = Align (gen0_max_size);
29965 gen0_min_size = min (gen0_min_size, gen0_max_size);
29967 // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap.
29968 size_t gen1_max_size = (size_t)
29969 #ifdef MULTIPLE_HEAPS
29970 max (6*1024*1024, Align(soh_segment_size/2));
29971 #else //MULTIPLE_HEAPS
29972 (gc_can_use_concurrent ?
29974 max (6*1024*1024, Align(soh_segment_size/2)));
29975 #endif //MULTIPLE_HEAPS
29977 dprintf (GTC_LOG, ("gen0 min: %Id, max: %Id, gen1 max: %Id",
29978 gen0_min_size, gen0_max_size, gen1_max_size));
29980 for (int i = latency_level_first; i <= latency_level_last; i++)
29982 static_data_table[i][0].min_size = gen0_min_size;
29983 static_data_table[i][0].max_size = gen0_max_size;
29984 static_data_table[i][1].max_size = gen1_max_size;
29988 bool gc_heap::init_dynamic_data()
29990 qpf = GCToOSInterface::QueryPerformanceFrequency();
29992 uint32_t now = (uint32_t)GetHighPrecisionTimeStamp();
29996 for (int i = 0; i <= max_generation+1; i++)
29998 dynamic_data* dd = dynamic_data_of (i);
30000 dd->time_clock = now;
30001 dd->current_size = 0;
30002 dd->promoted_size = 0;
30003 dd->collection_count = 0;
30004 dd->new_allocation = dd->min_size;
30005 dd->gc_new_allocation = dd->new_allocation;
30006 dd->desired_allocation = dd->new_allocation;
30007 dd->fragmentation = 0;
30010 #ifdef GC_CONFIG_DRIVEN
30011 if (heap_number == 0)
30013 #endif //GC_CONFIG_DRIVEN
30018 float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
30020 if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
30021 return ((limit - limit*cst) / (1.0f - (cst * limit)));
30027 //if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may
30028 //not be correct (collection happened too soon). Correct with a linear estimation based on the previous
30029 //value of the budget
30030 static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation,
30031 size_t previous_desired_allocation, size_t collection_count)
30033 if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0))
30035 dprintf (2, ("allocation fraction: %d", (int)(allocation_fraction/100.0)));
30036 new_allocation = (size_t)(allocation_fraction*new_allocation + (1.0-allocation_fraction)*previous_desired_allocation);
30039 size_t smoothing = 3; // exponential smoothing factor
30040 if (smoothing > collection_count)
30041 smoothing = collection_count;
30042 new_allocation = new_allocation / smoothing + ((previous_desired_allocation / smoothing) * (smoothing-1));
30044 UNREFERENCED_PARAMETER(collection_count);
30046 return new_allocation;
30049 size_t gc_heap::desired_new_allocation (dynamic_data* dd,
30050 size_t out, int gen_number,
30053 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30055 if (dd_begin_data_size (dd) == 0)
30057 size_t new_allocation = dd_min_size (dd);
30058 current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation;
30059 return new_allocation;
30064 size_t previous_desired_allocation = dd_desired_allocation (dd);
30065 size_t current_size = dd_current_size (dd);
30066 float max_limit = dd_max_limit (dd);
30067 float limit = dd_limit (dd);
30068 size_t min_gc_size = dd_min_size (dd);
30070 size_t max_size = dd_max_size (dd);
30071 size_t new_allocation = 0;
30072 float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd));
30073 if (gen_number >= max_generation)
30075 size_t new_size = 0;
30077 cst = min (1.0f, float (out) / float (dd_begin_data_size (dd)));
30079 f = surv_to_growth (cst, limit, max_limit);
30080 size_t max_growth_size = (size_t)(max_size / f);
30081 if (current_size >= max_growth_size)
30083 new_size = max_size;
30087 new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size);
30090 assert ((new_size >= current_size) || (new_size == max_size));
30092 if (gen_number == max_generation)
30094 new_allocation = max((new_size - current_size), min_gc_size);
30096 new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
30097 dd_desired_allocation (dd), dd_collection_count (dd));
30099 if ((dd_fragmentation (dd) > ((size_t)((f-1)*current_size))))
30101 //reducing allocation in case of fragmentation
30102 size_t new_allocation1 = max (min_gc_size,
30104 (size_t)((float)new_allocation * current_size /
30105 ((float)current_size + 2*dd_fragmentation (dd))));
30106 dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id",
30107 new_allocation, new_allocation1));
30108 new_allocation = new_allocation1;
30111 else //large object heap
30113 uint32_t memory_load = 0;
30114 uint64_t available_physical = 0;
30115 get_memory_info (&memory_load, &available_physical);
30117 if (heap_hard_limit)
30119 size_t loh_allocated = 0;
30120 size_t loh_committed = committed_size (true, &loh_allocated);
30121 dprintf (1, ("GC#%Id h%d, GMI: LOH budget, LOH commit %Id (obj %Id, frag %Id), total commit: %Id (recorded: %Id)",
30122 (size_t)settings.gc_index, heap_number,
30123 loh_committed, loh_allocated,
30124 dd_fragmentation (dynamic_data_of (max_generation + 1)),
30125 get_total_committed_size(), (current_total_committed - current_total_committed_bookkeeping)));
30128 if (heap_number == 0)
30129 settings.exit_memory_load = memory_load;
30130 if (available_physical > 1024*1024)
30131 available_physical -= 1024*1024;
30133 uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number));
30134 if (available_free > (uint64_t)MAX_PTR)
30136 available_free = (uint64_t)MAX_PTR;
30139 //try to avoid OOM during large object allocation
30140 new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))),
30141 (size_t)available_free),
30142 max ((current_size/4), min_gc_size));
30144 new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
30145 dd_desired_allocation (dd), dd_collection_count (dd));
30151 size_t survivors = out;
30152 cst = float (survivors) / float (dd_begin_data_size (dd));
30153 f = surv_to_growth (cst, limit, max_limit);
30154 new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size);
30156 new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
30157 dd_desired_allocation (dd), dd_collection_count (dd));
30159 if (gen_number == 0)
30164 //printf ("%f, %Id\n", cst, new_allocation);
30165 size_t free_space = generation_free_list_space (generation_of (gen_number));
30166 // DTREVIEW - is min_gc_size really a good choice?
30167 // on 64-bit this will almost always be true.
30168 dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
30169 if (free_space > min_gc_size)
30171 settings.gen0_reduction_count = 2;
30175 if (settings.gen0_reduction_count > 0)
30176 settings.gen0_reduction_count--;
30179 if (settings.gen0_reduction_count > 0)
30181 dprintf (2, ("Reducing new allocation based on fragmentation"));
30182 new_allocation = min (new_allocation,
30183 max (min_gc_size, (max_size/3)));
30188 size_t new_allocation_ret =
30189 Align (new_allocation, get_alignment_constant (!(gen_number == (max_generation+1))));
30190 int gen_data_index = gen_number;
30191 gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]);
30192 gen_data->new_allocation = new_allocation_ret;
30194 dd_surv (dd) = cst;
30196 #ifdef SIMPLE_DPRINTF
30197 dprintf (1, ("h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id",
30198 heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)),
30199 (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
30201 dprintf (1,("gen: %d in: %Id out: %Id ", gen_number, generation_allocation_size (generation_of (gen_number)), out));
30202 dprintf (1,("current: %Id alloc: %Id ", current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd))));
30203 dprintf (1,(" surv: %d%% f: %d%% new-size: %Id new-alloc: %Id",
30204 (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
30205 #endif //SIMPLE_DPRINTF
30207 return new_allocation_ret;
30211 //returns the planned size of a generation (including free list element)
30212 size_t gc_heap::generation_plan_size (int gen_number)
30214 if (0 == gen_number)
30215 return max((heap_segment_plan_allocated (ephemeral_heap_segment) -
30216 generation_plan_allocation_start (generation_of (gen_number))),
30217 (int)Align (min_obj_size));
30220 generation* gen = generation_of (gen_number);
30221 if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
30222 return (generation_plan_allocation_start (generation_of (gen_number - 1)) -
30223 generation_plan_allocation_start (generation_of (gen_number)));
30226 size_t gensize = 0;
30227 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30229 PREFIX_ASSUME(seg != NULL);
30231 while (seg && (seg != ephemeral_heap_segment))
30233 gensize += heap_segment_plan_allocated (seg) -
30234 heap_segment_mem (seg);
30235 seg = heap_segment_next_rw (seg);
30239 gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) -
30240 heap_segment_mem (ephemeral_heap_segment));
30248 //returns the size of a generation (including free list element)
30249 size_t gc_heap::generation_size (int gen_number)
30251 if (0 == gen_number)
30252 return max((heap_segment_allocated (ephemeral_heap_segment) -
30253 generation_allocation_start (generation_of (gen_number))),
30254 (int)Align (min_obj_size));
30257 generation* gen = generation_of (gen_number);
30258 if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
30259 return (generation_allocation_start (generation_of (gen_number - 1)) -
30260 generation_allocation_start (generation_of (gen_number)));
30263 size_t gensize = 0;
30264 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30266 PREFIX_ASSUME(seg != NULL);
30268 while (seg && (seg != ephemeral_heap_segment))
30270 gensize += heap_segment_allocated (seg) -
30271 heap_segment_mem (seg);
30272 seg = heap_segment_next_rw (seg);
30276 gensize += (generation_allocation_start (generation_of (gen_number - 1)) -
30277 heap_segment_mem (ephemeral_heap_segment));
30286 size_t gc_heap::compute_in (int gen_number)
30288 assert (gen_number != 0);
30289 dynamic_data* dd = dynamic_data_of (gen_number);
30291 size_t in = generation_allocation_size (generation_of (gen_number));
30293 if (gen_number == max_generation && ephemeral_promotion)
30296 for (int i = 0; i <= max_generation; i++)
30298 dynamic_data* dd = dynamic_data_of (i);
30299 in += dd_survived_size (dd);
30300 if (i != max_generation)
30302 generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd);
30307 dd_gc_new_allocation (dd) -= in;
30308 dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30310 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30311 gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
30314 generation_allocation_size (generation_of (gen_number)) = 0;
30318 void gc_heap::compute_promoted_allocation (int gen_number)
30320 compute_in (gen_number);
30325 size_t gc_heap::trim_youngest_desired (uint32_t memory_load,
30326 size_t total_new_allocation,
30327 size_t total_min_allocation)
30329 if (memory_load < MAX_ALLOWED_MEM_LOAD)
30331 // If the total of memory load and gen0 budget exceeds
30332 // our max memory load limit, trim the gen0 budget so the total
30333 // is the max memory load limit.
30334 size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent;
30335 return min (total_new_allocation, remain_memory_load);
30339 return max (mem_one_percent, total_min_allocation);
30343 size_t gc_heap::joined_youngest_desired (size_t new_allocation)
30345 dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation));
30347 size_t final_new_allocation = new_allocation;
30348 if (new_allocation > MIN_YOUNGEST_GEN_DESIRED)
30350 uint32_t num_heaps = 1;
30352 #ifdef MULTIPLE_HEAPS
30353 num_heaps = gc_heap::n_heaps;
30354 #endif //MULTIPLE_HEAPS
30356 size_t total_new_allocation = new_allocation * num_heaps;
30357 size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps;
30359 if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) ||
30360 (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation)))
30362 uint32_t memory_load = 0;
30363 get_memory_info (&memory_load);
30364 settings.exit_memory_load = memory_load;
30365 dprintf (2, ("Current emory load: %d", memory_load));
30367 size_t final_total =
30368 trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation);
30369 size_t max_new_allocation =
30370 #ifdef MULTIPLE_HEAPS
30371 dd_max_size (g_heaps[0]->dynamic_data_of (0));
30372 #else //MULTIPLE_HEAPS
30373 dd_max_size (dynamic_data_of (0));
30374 #endif //MULTIPLE_HEAPS
30376 final_new_allocation = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation);
30380 if (final_new_allocation < new_allocation)
30382 settings.gen0_reduction_count = 2;
30385 return final_new_allocation;
30390 gc_history_per_heap* gc_heap::get_gc_data_per_heap()
30392 #ifdef BACKGROUND_GC
30393 return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap);
30395 return &gc_data_per_heap;
30396 #endif //BACKGROUND_GC
30399 void gc_heap::compute_new_dynamic_data (int gen_number)
30401 PREFIX_ASSUME(gen_number >= 0);
30402 PREFIX_ASSUME(gen_number <= max_generation);
30404 dynamic_data* dd = dynamic_data_of (gen_number);
30405 generation* gen = generation_of (gen_number);
30406 size_t in = (gen_number==0) ? 0 : compute_in (gen_number);
30408 size_t total_gen_size = generation_size (gen_number);
30409 //keep track of fragmentation
30410 dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);
30411 dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
30413 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30415 size_t out = dd_survived_size (dd);
30417 gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
30418 gen_data->size_after = total_gen_size;
30419 gen_data->free_list_space_after = generation_free_list_space (gen);
30420 gen_data->free_obj_space_after = generation_free_obj_space (gen);
30422 if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1))
30424 // When we are in the low latency mode, we can still be
30425 // condemning more than gen1's 'cause of induced GCs.
30426 dd_desired_allocation (dd) = low_latency_alloc;
30430 if (gen_number == 0)
30432 //compensate for dead finalizable objects promotion.
30433 //they shoudn't be counted for growth.
30434 size_t final_promoted = 0;
30435 final_promoted = min (promoted_bytes (heap_number), out);
30436 // Prefast: this is clear from above but prefast needs to be told explicitly
30437 PREFIX_ASSUME(final_promoted <= out);
30439 dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted));
30440 dd_freach_previous_promotion (dd) = final_promoted;
30441 size_t lower_bound = desired_new_allocation (dd, out-final_promoted, gen_number, 0);
30443 if (settings.condemned_generation == 0)
30445 //there is no noise.
30446 dd_desired_allocation (dd) = lower_bound;
30450 size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1);
30452 // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO>
30453 //assert ( lower_bound <= higher_bound);
30455 //discount the noise. Change the desired allocation
30456 //only if the previous value is outside of the range.
30457 if (dd_desired_allocation (dd) < lower_bound)
30459 dd_desired_allocation (dd) = lower_bound;
30461 else if (dd_desired_allocation (dd) > higher_bound)
30463 dd_desired_allocation (dd) = higher_bound;
30465 #if defined (BIT64) && !defined (MULTIPLE_HEAPS)
30466 dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd));
30467 #endif // BIT64 && !MULTIPLE_HEAPS
30468 trim_youngest_desired_low_memory();
30469 dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd)));
30474 dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0);
30478 gen_data->pinned_surv = dd_pinned_survived_size (dd);
30479 gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);
30481 dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
30482 dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30485 dd_promoted_size (dd) = out;
30486 if (gen_number == max_generation)
30488 dd = dynamic_data_of (max_generation+1);
30489 total_gen_size = generation_size (max_generation + 1);
30490 dd_fragmentation (dd) = generation_free_list_space (large_object_generation) +
30491 generation_free_obj_space (large_object_generation);
30492 dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
30493 dd_survived_size (dd) = dd_current_size (dd);
30495 out = dd_current_size (dd);
30496 dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
30497 dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
30498 get_alignment_constant (FALSE));
30499 dd_new_allocation (dd) = dd_gc_new_allocation (dd);
30501 gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
30502 gen_data->size_after = total_gen_size;
30503 gen_data->free_list_space_after = generation_free_list_space (large_object_generation);
30504 gen_data->free_obj_space_after = generation_free_obj_space (large_object_generation);
30505 gen_data->npinned_surv = out;
30506 #ifdef BACKGROUND_GC
30507 end_loh_size = total_gen_size;
30508 #endif //BACKGROUND_GC
30510 dd_promoted_size (dd) = out;
30514 void gc_heap::trim_youngest_desired_low_memory()
30516 if (g_low_memory_status)
30518 size_t committed_mem = 0;
30519 heap_segment* seg = generation_start_segment (generation_of (max_generation));
30522 committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30523 seg = heap_segment_next (seg);
30525 seg = generation_start_segment (generation_of (max_generation + 1));
30528 committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
30529 seg = heap_segment_next (seg);
30532 dynamic_data* dd = dynamic_data_of (0);
30533 size_t current = dd_desired_allocation (dd);
30534 size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd));
30536 dd_desired_allocation (dd) = min (current, candidate);
30540 void gc_heap::decommit_ephemeral_segment_pages()
30542 if (settings.concurrent)
30547 size_t slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30549 dynamic_data* dd = dynamic_data_of (0);
30551 #ifndef MULTIPLE_HEAPS
30552 size_t extra_space = (g_low_memory_status ? 0 : (512 * 1024));
30553 size_t decommit_timeout = (g_low_memory_status ? 0 : GC_EPHEMERAL_DECOMMIT_TIMEOUT);
30554 size_t ephemeral_elapsed = dd_time_clock(dd) - gc_last_ephemeral_decommit_time;
30556 if (dd_desired_allocation (dd) > gc_gen0_desired_high)
30558 gc_gen0_desired_high = dd_desired_allocation (dd) + extra_space;
30561 if (ephemeral_elapsed >= decommit_timeout)
30563 slack_space = min (slack_space, gc_gen0_desired_high);
30565 gc_last_ephemeral_decommit_time = dd_time_clock(dd);
30566 gc_gen0_desired_high = 0;
30568 #endif //!MULTIPLE_HEAPS
30570 if (settings.condemned_generation >= (max_generation-1))
30572 size_t new_slack_space =
30574 max(min(min(soh_segment_size/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
30576 #ifdef FEATURE_CORECLR
30577 dd_desired_allocation (dd);
30580 #endif //FEATURE_CORECLR
30583 slack_space = min (slack_space, new_slack_space);
30586 decommit_heap_segment_pages (ephemeral_heap_segment, slack_space);
30588 gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
30589 current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
30592 //This is meant to be called by decide_on_compacting.
30594 size_t gc_heap::generation_fragmentation (generation* gen,
30595 generation* consing_gen,
30599 uint8_t* alloc = generation_allocation_pointer (consing_gen);
30600 // If the allocation pointer has reached the ephemeral segment
30601 // fine, otherwise the whole ephemeral segment is considered
30603 if (in_range_for_segment (alloc, ephemeral_heap_segment))
30605 if (alloc <= heap_segment_allocated(ephemeral_heap_segment))
30606 frag = end - alloc;
30609 // case when no survivors, allocated set to beginning
30612 dprintf (3, ("ephemeral frag: %Id", frag));
30615 frag = (heap_segment_allocated (ephemeral_heap_segment) -
30616 heap_segment_mem (ephemeral_heap_segment));
30617 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
30619 PREFIX_ASSUME(seg != NULL);
30621 while (seg != ephemeral_heap_segment)
30623 frag += (heap_segment_allocated (seg) -
30624 heap_segment_plan_allocated (seg));
30625 dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg,
30626 (heap_segment_allocated (seg) -
30627 heap_segment_plan_allocated (seg))));
30629 seg = heap_segment_next_rw (seg);
30632 dprintf (3, ("frag: %Id discounting pinned plugs", frag));
30633 //add the length of the dequeued plug free space
30635 while (bos < mark_stack_bos)
30637 frag += (pinned_len (pinned_plug_of (bos)));
30644 // for SOH this returns the total sizes of the generation and its
30645 // younger generation(s).
30646 // for LOH this returns just LOH size.
30647 size_t gc_heap::generation_sizes (generation* gen)
30650 if (generation_start_segment (gen ) == ephemeral_heap_segment)
30651 result = (heap_segment_allocated (ephemeral_heap_segment) -
30652 generation_allocation_start (gen));
30655 heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
30657 PREFIX_ASSUME(seg != NULL);
30661 result += (heap_segment_allocated (seg) -
30662 heap_segment_mem (seg));
30663 seg = heap_segment_next_in_range (seg);
30670 size_t gc_heap::estimated_reclaim (int gen_number)
30672 dynamic_data* dd = dynamic_data_of (gen_number);
30673 size_t gen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd));
30674 size_t gen_total_size = gen_allocated + dd_current_size (dd);
30675 size_t est_gen_surv = (size_t)((float) (gen_total_size) * dd_surv (dd));
30676 size_t est_gen_free = gen_total_size - est_gen_surv + dd_fragmentation (dd);
30678 dprintf (GTC_LOG, ("h%d gen%d total size: %Id, est dead space: %Id (s: %d, allocated: %Id), frag: %Id",
30679 heap_number, gen_number,
30682 (int)(dd_surv (dd) * 100),
30684 dd_fragmentation (dd)));
30686 return est_gen_free;
30689 BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
30690 size_t fragmentation,
30691 BOOL& should_expand)
30693 BOOL should_compact = FALSE;
30694 should_expand = FALSE;
30695 generation* gen = generation_of (condemned_gen_number);
30696 dynamic_data* dd = dynamic_data_of (condemned_gen_number);
30697 size_t gen_sizes = generation_sizes(gen);
30698 float fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) :
30699 (float (fragmentation) / gen_sizes) );
30701 dprintf (GTC_LOG, ("h%d g%d fragmentation: %Id (%d%%)",
30702 heap_number, settings.condemned_generation,
30703 fragmentation, (int)(fragmentation_burden * 100.0)));
30706 // for pure GC stress runs we need compaction, for GC stress "mix"
30707 // we need to ensure a better mix of compacting and sweeping collections
30708 if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
30709 && !g_pConfig->IsGCStressMix())
30710 should_compact = TRUE;
30713 // in GC stress "mix" mode, for stress induced collections make sure we
30714 // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
30715 // against the GC's determination, as it may lead to premature OOMs.
30716 if (g_pConfig->IsGCStressMix() && settings.stress_induced)
30718 int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
30719 int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
30720 if (compactions < sweeps / 10)
30722 should_compact = TRUE;
30726 #endif //STRESS_HEAP
30728 if (GCConfig::GetForceCompact())
30729 should_compact = TRUE;
30731 if ((condemned_gen_number == max_generation) && last_gc_before_oom)
30733 should_compact = TRUE;
30734 last_gc_before_oom = FALSE;
30735 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
30738 if (settings.reason == reason_induced_compacting)
30740 dprintf (2, ("induced compacting GC"));
30741 should_compact = TRUE;
30742 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting);
30745 if (settings.reason == reason_pm_full_gc)
30747 assert (condemned_gen_number == max_generation);
30748 if (heap_number == 0)
30750 dprintf (GTC_LOG, ("PM doing compacting full GC after a gen1"));
30752 should_compact = TRUE;
30755 dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%",
30756 fragmentation, (int) (100*fragmentation_burden)));
30758 if (provisional_mode_triggered && (condemned_gen_number == (max_generation - 1)))
30760 dprintf (GTC_LOG, ("gen1 in PM always compact"));
30761 should_compact = TRUE;
30764 if (!should_compact)
30766 if (dt_low_ephemeral_space_p (tuning_deciding_compaction))
30768 dprintf(GTC_LOG, ("compacting due to low ephemeral"));
30769 should_compact = TRUE;
30770 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral);
30774 if (should_compact)
30776 if ((condemned_gen_number >= (max_generation - 1)))
30778 if (dt_low_ephemeral_space_p (tuning_deciding_expansion))
30780 dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction"));
30781 should_expand = TRUE;
30787 BOOL high_memory = FALSE;
30790 if (!should_compact)
30792 // We are not putting this in dt_high_frag_p because it's not exactly
30793 // high fragmentation - it's just enough planned fragmentation for us to
30794 // want to compact. Also the "fragmentation" we are talking about here
30795 // is different from anywhere else.
30796 BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) &&
30797 (fragmentation_burden >= dd_fragmentation_burden_limit (dd)));
30801 #ifdef BACKGROUND_GC
30802 // do not force compaction if this was a stress-induced GC
30803 IN_STRESS_HEAP(if (!settings.stress_induced))
30805 #endif // BACKGROUND_GC
30806 assert (settings.concurrent == FALSE);
30807 should_compact = TRUE;
30808 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag);
30809 #ifdef BACKGROUND_GC
30811 #endif // BACKGROUND_GC
30815 // check for high memory situation
30816 if(!should_compact)
30818 uint32_t num_heaps = 1;
30819 #ifdef MULTIPLE_HEAPS
30820 num_heaps = gc_heap::n_heaps;
30821 #endif // MULTIPLE_HEAPS
30823 ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation);
30825 if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th))
30827 if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps)))
30829 dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
30830 should_compact = TRUE;
30831 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag);
30833 high_memory = TRUE;
30835 else if(settings.entry_memory_load >= v_high_memory_load_th)
30837 if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps)))
30839 dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
30840 should_compact = TRUE;
30841 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag);
30843 high_memory = TRUE;
30849 // The purpose of calling ensure_gap_allocation here is to make sure
30850 // that we actually are able to commit the memory to allocate generation
30852 if ((should_compact == FALSE) &&
30853 (ensure_gap_allocation (condemned_gen_number) == FALSE))
30855 should_compact = TRUE;
30856 get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps);
30859 if (settings.condemned_generation == max_generation)
30861 //check the progress
30864 (high_memory && !should_compact) ||
30866 (generation_plan_allocation_start (generation_of (max_generation - 1)) >=
30867 generation_allocation_start (generation_of (max_generation - 1))))
30869 dprintf (1, ("gen1 start %Ix->%Ix, gen2 size %Id->%Id, lock elevation",
30870 generation_allocation_start (generation_of (max_generation - 1)),
30871 generation_plan_allocation_start (generation_of (max_generation - 1)),
30872 generation_size (max_generation),
30873 generation_plan_size (max_generation)));
30874 //no progress -> lock
30875 settings.should_lock_elevation = TRUE;
30879 if (settings.pause_mode == pause_no_gc)
30881 should_compact = TRUE;
30882 if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment))
30883 < soh_allocation_no_gc)
30885 should_expand = TRUE;
30889 dprintf (2, ("will %s(%s)", (should_compact ? "compact" : "sweep"), (should_expand ? "ex" : "")));
30890 return should_compact;
30893 size_t align_lower_good_size_allocation (size_t size)
30895 return (size/64)*64;
30898 size_t gc_heap::approximate_new_allocation()
30900 dynamic_data* dd0 = dynamic_data_of (0);
30901 return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3));
30904 BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end, size_t end_space_required, gc_tuning_point tp)
30906 BOOL can_fit = FALSE;
30907 size_t end_seg_space = (size_t)(seg_end - start);
30908 if (end_seg_space > end_space_required)
30910 // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg
30911 // so we treat that as segment end, do we have enough space.
30912 if (heap_hard_limit)
30914 size_t left_in_commit = heap_hard_limit - current_total_committed;
30916 #ifdef MULTIPLE_HEAPS
30917 num_heaps = n_heaps;
30918 #endif //MULTIPLE_HEAPS
30919 left_in_commit /= num_heaps;
30920 if (left_in_commit > end_space_required)
30925 dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph (%d)",
30926 heap_number, end_seg_space,
30927 left_in_commit, end_space_required,
30928 (can_fit ? "ok" : "short"), (int)tp));
30937 // After we did a GC we expect to have at least this
30938 // much space at the end of the segment to satisfy
30939 // a reasonable amount of allocation requests.
30940 size_t gc_heap::end_space_after_gc()
30942 return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC + Align (min_obj_size)));
30945 BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
30947 uint8_t* start = 0;
30949 if ((tp == tuning_deciding_condemned_gen) ||
30950 (tp == tuning_deciding_compaction))
30952 start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment));
30953 if (settings.concurrent)
30955 dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (alloc_allocated)",
30956 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30960 dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (allocated)",
30961 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment))));
30964 else if (tp == tuning_deciding_expansion)
30966 start = heap_segment_plan_allocated (ephemeral_heap_segment);
30967 dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment based on plan",
30968 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start)));
30972 assert (tp == tuning_deciding_full_gc);
30973 dprintf (GTC_LOG, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)",
30974 (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
30975 start = alloc_allocated;
30978 if (start == 0) // empty ephemeral generations
30980 assert (tp == tuning_deciding_expansion);
30981 // if there are no survivors in the ephemeral segment,
30982 // this should be the beginning of ephemeral segment.
30983 start = generation_allocation_pointer (generation_of (max_generation));
30984 assert (start == heap_segment_mem (ephemeral_heap_segment));
30987 if (tp == tuning_deciding_expansion)
30989 assert (settings.condemned_generation >= (max_generation-1));
30990 size_t gen0size = approximate_new_allocation();
30991 size_t eph_size = gen0size;
30992 size_t gen_min_sizes = 0;
30994 for (int j = 1; j <= max_generation-1; j++)
30996 gen_min_sizes += 2*dd_min_size (dynamic_data_of(j));
30999 eph_size += gen_min_sizes;
31001 dprintf (3, ("h%d deciding on expansion, need %Id (gen0: %Id, 2*min: %Id)",
31002 heap_number, gen0size, gen_min_sizes, eph_size));
31004 // We must find room for one large object and enough room for gen0size
31005 if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size)
31007 dprintf (3, ("Enough room before end of segment"));
31012 size_t room = align_lower_good_size_allocation
31013 (heap_segment_reserved (ephemeral_heap_segment) - start);
31014 size_t end_seg = room;
31016 //look at the plug free space
31017 size_t largest_alloc = END_SPACE_AFTER_GC + Align (min_obj_size);
31018 bool large_chunk_found = FALSE;
31020 uint8_t* gen0start = generation_plan_allocation_start (youngest_generation);
31021 dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start));
31022 if (gen0start == 0)
31024 dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id",
31026 while ((bos < mark_stack_bos) &&
31027 !((room >= gen0size) && large_chunk_found))
31029 uint8_t* plug = pinned_plug (pinned_plug_of (bos));
31030 if (in_range_for_segment (plug, ephemeral_heap_segment))
31032 if (plug >= gen0start)
31034 size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos)));
31036 if (!large_chunk_found)
31038 large_chunk_found = (chunk >= largest_alloc);
31040 dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id",
31041 room, large_chunk_found));
31047 if (room >= gen0size)
31049 if (large_chunk_found)
31051 sufficient_gen0_space_p = TRUE;
31053 dprintf (3, ("Enough room"));
31058 // now we need to find largest_alloc at the end of the segment.
31059 if (end_seg >= end_space_after_gc())
31061 dprintf (3, ("Enough room (may need end of seg)"));
31067 dprintf (3, ("Not enough room"));
31073 size_t end_space = 0;
31074 dynamic_data* dd = dynamic_data_of (0);
31075 if ((tp == tuning_deciding_condemned_gen) ||
31076 (tp == tuning_deciding_full_gc))
31078 end_space = max (2*dd_min_size (dd), end_space_after_gc());
31082 assert (tp == tuning_deciding_compaction);
31083 end_space = approximate_new_allocation();
31086 BOOL can_fit = sufficient_space_end_seg (start, heap_segment_reserved (ephemeral_heap_segment), end_space, tp);
31092 CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_bytes)
31094 //create a new alloc context because gen3context is shared.
31095 alloc_context acontext;
31096 acontext.alloc_ptr = 0;
31097 acontext.alloc_limit = 0;
31098 acontext.alloc_bytes = 0;
31099 #ifdef MULTIPLE_HEAPS
31100 acontext.set_alloc_heap(vm_heap);
31101 #endif //MULTIPLE_HEAPS
31104 size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
31106 size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size));
31109 if (jsize >= maxObjectSize)
31111 if (GCConfig::GetBreakOnOOM())
31113 GCToOSInterface::DebugBreak();
31118 size_t size = AlignQword (jsize);
31119 int align_const = get_alignment_constant (FALSE);
31120 #ifdef FEATURE_LOH_COMPACTION
31121 size_t pad = Align (loh_padding_obj_size, align_const);
31124 #endif //FEATURE_LOH_COMPACTION
31126 assert (size >= Align (min_obj_size, align_const));
31128 #pragma inline_depth(0)
31130 if (! allocate_more_space (&acontext, (size + pad), max_generation+1))
31136 #pragma inline_depth(20)
31140 uint8_t* current_lowest_address = lowest_address;
31141 uint8_t* current_highest_address = highest_address;
31142 #ifdef BACKGROUND_GC
31143 if (recursive_gc_sync::background_running_p())
31145 current_lowest_address = background_saved_lowest_address;
31146 current_highest_address = background_saved_highest_address;
31148 #endif //BACKGROUND_GC
31149 #endif // MARK_ARRAY
31151 #ifdef FEATURE_LOH_COMPACTION
31152 // The GC allocator made a free object already in this alloc context and
31153 // adjusted the alloc_ptr accordingly.
31154 #endif //FEATURE_LOH_COMPACTION
31156 uint8_t* result = acontext.alloc_ptr;
31158 assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size);
31159 alloc_bytes += size;
31161 CObjectHeader* obj = (CObjectHeader*)result;
31164 if (recursive_gc_sync::background_running_p())
31166 if ((result < current_highest_address) && (result >= current_lowest_address))
31168 dprintf (3, ("Clearing mark bit at address %Ix",
31169 (size_t)(&mark_array [mark_word_of (result)])));
31171 mark_array_clear_marked (result);
31173 #ifdef BACKGROUND_GC
31174 //the object has to cover one full mark uint32_t
31175 assert (size > mark_word_size);
31176 if (current_c_gc_state != c_gc_state_free)
31178 dprintf (3, ("Concurrent allocation of a large object %Ix",
31180 //mark the new block specially so we know it is a new object
31181 if ((result < current_highest_address) && (result >= current_lowest_address))
31183 dprintf (3, ("Setting mark bit at address %Ix",
31184 (size_t)(&mark_array [mark_word_of (result)])));
31186 mark_array_set_marked (result);
31189 #endif //BACKGROUND_GC
31191 #endif //MARK_ARRAY
31194 assert ((size_t)obj == Align ((size_t)obj, align_const));
31199 void reset_memory (uint8_t* o, size_t sizeo)
31201 if (sizeo > 128 * 1024)
31203 // We cannot reset the memory for the useful part of a free object.
31204 size_t size_to_skip = min_free_list - plug_skew;
31206 size_t page_start = align_on_page ((size_t)(o + size_to_skip));
31207 size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start;
31208 // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail
31209 // on write watched memory.
31212 #ifdef MULTIPLE_HEAPS
31213 bool unlock_p = true;
31215 // We don't do unlock because there could be many processes using workstation GC and it's
31216 // bad perf to have many threads doing unlock at the same time.
31217 bool unlock_p = false;
31218 #endif //MULTIPLE_HEAPS
31220 reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p);
31225 void gc_heap::reset_large_object (uint8_t* o)
31227 // If it's a large object, allow the O/S to discard the backing store for these pages.
31228 reset_memory (o, size(o));
31231 BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
31234 // It shouldn't be necessary to do these comparisons because this is only used for blocking
31235 // GCs and LOH segments cannot be out of range.
31236 if ((o >= lowest_address) && (o < highest_address))
31256 void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
31258 // Now walk the portion of memory that is actually being relocated.
31259 walk_relocation (profiling_context, fn);
31261 #ifdef FEATURE_LOH_COMPACTION
31262 if (loh_compacted_p)
31264 walk_relocation_for_loh (profiling_context, fn);
31266 #endif //FEATURE_LOH_COMPACTION
31269 void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
31271 generation* gen = large_object_generation;
31272 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
31274 PREFIX_ASSUME(seg != NULL);
31276 uint8_t* o = generation_allocation_start (gen);
31277 uint8_t* plug_end = o;
31278 uint8_t* plug_start = o;
31282 if (o >= heap_segment_allocated (seg))
31284 seg = heap_segment_next (seg);
31288 o = heap_segment_mem (seg);
31290 if (large_object_marked(o, FALSE))
31297 o = o + AlignQword (size (o));
31298 if (o >= heap_segment_allocated (seg))
31302 m = large_object_marked (o, FALSE);
31307 fn (plug_start, plug_end, 0, profiling_context, false, false);
31311 while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
31313 o = o + AlignQword (size (o));
31319 #ifdef BACKGROUND_GC
31321 BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp)
31324 if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
31326 if (mark_array_marked (o))
31330 mark_array_clear_marked (o);
31331 //dprintf (3, ("mark array bit for object %Ix is cleared", o));
31332 dprintf (3, ("CM: %Ix", o));
31342 dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm")));
31346 void gc_heap::background_delay_delete_loh_segments()
31348 generation* gen = large_object_generation;
31349 heap_segment* seg = heap_segment_rw (generation_start_segment (large_object_generation));
31350 heap_segment* prev_seg = 0;
31354 heap_segment* next_seg = heap_segment_next (seg);
31355 if (seg->flags & heap_segment_flags_loh_delete)
31357 dprintf (3, ("deleting %Ix-%Ix-%Ix", (size_t)seg, heap_segment_allocated (seg), heap_segment_reserved (seg)));
31358 delete_heap_segment (seg, (GCConfig::GetRetainVM() != 0));
31359 heap_segment_next (prev_seg) = next_seg;
31370 uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL large_objects_p)
31373 (large_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg));
31376 void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b)
31381 if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
31382 !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL))
31384 dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end));
31385 memset (start, b, (end - start));
31388 #endif //VERIFY_HEAP
31391 void gc_heap::generation_delete_heap_segment (generation* gen,
31393 heap_segment* prev_seg,
31394 heap_segment* next_seg)
31396 dprintf (3, ("bgc sweep: deleting seg %Ix", seg));
31397 if (gen == large_object_generation)
31399 dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg));
31401 // We cannot thread segs in here onto freeable_large_heap_segment because
31402 // grow_brick_card_tables could be committing mark array which needs to read
31403 // the seg list. So we delay it till next time we suspend EE.
31404 seg->flags |= heap_segment_flags_loh_delete;
31405 // Since we will be decommitting the seg, we need to prevent heap verification
31406 // to verify this segment.
31407 heap_segment_allocated (seg) = heap_segment_mem (seg);
31411 if (seg == ephemeral_heap_segment)
31416 heap_segment_next (next_seg) = prev_seg;
31418 dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
31419 heap_segment_next (seg) = freeable_small_heap_segment;
31420 freeable_small_heap_segment = seg;
31423 decommit_heap_segment (seg);
31424 seg->flags |= heap_segment_flags_decommitted;
31426 set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
31429 void gc_heap::process_background_segment_end (heap_segment* seg,
31431 uint8_t* last_plug_end,
31432 heap_segment* start_seg,
31436 uint8_t* allocated = heap_segment_allocated (seg);
31437 uint8_t* background_allocated = heap_segment_background_allocated (seg);
31438 BOOL loh_p = heap_segment_loh_p (seg);
31440 dprintf (3, ("Processing end of background segment [%Ix, %Ix[(%Ix[)",
31441 (size_t)heap_segment_mem (seg), background_allocated, allocated));
31443 if (!loh_p && (allocated != background_allocated))
31445 assert (gen != large_object_generation);
31447 dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[",
31448 (size_t)last_plug_end, background_allocated));
31449 thread_gap (last_plug_end, background_allocated - last_plug_end, generation_of (max_generation));
31452 fix_brick_to_highest (last_plug_end, background_allocated);
31454 // When we allowed fgc's during going through gaps, we could have erased the brick
31455 // that corresponds to bgc_allocated 'cause we had to update the brick there,
31456 // recover it here.
31457 fix_brick_to_highest (background_allocated, background_allocated);
31461 // by default, if allocated == background_allocated, it can't
31462 // be the ephemeral segment.
31463 if (seg == ephemeral_heap_segment)
31468 if (allocated == heap_segment_mem (seg))
31470 // this can happen with LOH segments when multiple threads
31471 // allocate new segments and not all of them were needed to
31472 // satisfy allocation requests.
31473 assert (gen == large_object_generation);
31476 if (last_plug_end == heap_segment_mem (seg))
31478 dprintf (3, ("Segment allocated is %Ix (beginning of this seg) - %s be deleted",
31479 (size_t)allocated, (*delete_p ? "should" : "should not")));
31481 if (seg != start_seg)
31488 dprintf (3, ("Trimming seg to %Ix[", (size_t)last_plug_end));
31489 heap_segment_allocated (seg) = last_plug_end;
31490 set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
31492 decommit_heap_segment_pages (seg, 0);
31496 dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg));
31497 bgc_verify_mark_array_cleared (seg);
31500 void gc_heap::process_n_background_segments (heap_segment* seg,
31501 heap_segment* prev_seg,
31504 assert (gen != large_object_generation);
31508 dprintf (2, ("processing seg %Ix (not seen by bgc mark)", seg));
31509 heap_segment* next_seg = heap_segment_next (seg);
31511 if (heap_segment_read_only_p (seg))
31517 if (heap_segment_allocated (seg) == heap_segment_mem (seg))
31519 // This can happen - if we have a LOH segment where nothing survived
31520 // or a SOH segment allocated by a gen1 GC when BGC was going where
31521 // nothing survived last time we did a gen1 GC.
31522 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
31530 verify_soh_segment_list();
31536 BOOL gc_heap::fgc_should_consider_object (uint8_t* o,
31538 BOOL consider_bgc_mark_p,
31539 BOOL check_current_sweep_p,
31540 BOOL check_saved_sweep_p)
31542 // the logic for this function must be kept in sync with the analogous function
31543 // in ToolBox\SOS\Strike\gc.cpp
31545 // TRUE means we don't need to check the bgc mark bit
31546 // FALSE means we do.
31547 BOOL no_bgc_mark_p = FALSE;
31549 if (consider_bgc_mark_p)
31551 if (check_current_sweep_p && (o < current_sweep_pos))
31553 dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos));
31554 no_bgc_mark_p = TRUE;
31557 if (!no_bgc_mark_p)
31559 if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start))
31561 dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start));
31562 no_bgc_mark_p = TRUE;
31565 if (!check_saved_sweep_p)
31567 uint8_t* background_allocated = heap_segment_background_allocated (seg);
31568 // if this was the saved ephemeral segment, check_saved_sweep_p
31569 // would've been true.
31570 assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start);
31571 // background_allocated could be 0 for the new segments acquired during bgc
31572 // sweep and we still want no_bgc_mark_p to be true.
31573 if (o >= background_allocated)
31575 dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated));
31576 no_bgc_mark_p = TRUE;
31583 no_bgc_mark_p = TRUE;
31586 dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), (background_object_marked (o, FALSE) ? "yes" : "no")));
31587 return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE));
31590 // consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all
31591 // if it's TRUE, check_current_sweep_p tells you if you should consider the
31592 // current sweep position or not.
31593 void gc_heap::should_check_bgc_mark (heap_segment* seg,
31594 BOOL* consider_bgc_mark_p,
31595 BOOL* check_current_sweep_p,
31596 BOOL* check_saved_sweep_p)
31598 // the logic for this function must be kept in sync with the analogous function
31599 // in ToolBox\SOS\Strike\gc.cpp
31600 *consider_bgc_mark_p = FALSE;
31601 *check_current_sweep_p = FALSE;
31602 *check_saved_sweep_p = FALSE;
31604 if (current_c_gc_state == c_gc_state_planning)
31606 // We are doing the current_sweep_pos comparison here because we have yet to
31607 // turn on the swept flag for the segment but in_range_for_segment will return
31608 // FALSE if the address is the same as reserved.
31609 if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg)))
31611 dprintf (3, ("seg %Ix is already swept by bgc", seg));
31615 *consider_bgc_mark_p = TRUE;
31617 dprintf (3, ("seg %Ix hasn't been swept by bgc", seg));
31619 if (seg == saved_sweep_ephemeral_seg)
31621 dprintf (3, ("seg %Ix is the saved ephemeral seg", seg));
31622 *check_saved_sweep_p = TRUE;
31625 if (in_range_for_segment (current_sweep_pos, seg))
31627 dprintf (3, ("current sweep pos is %Ix and within seg %Ix",
31628 current_sweep_pos, seg));
31629 *check_current_sweep_p = TRUE;
31635 void gc_heap::background_ephemeral_sweep()
31637 dprintf (3, ("bgc ephemeral sweep"));
31639 int align_const = get_alignment_constant (TRUE);
31641 saved_sweep_ephemeral_seg = ephemeral_heap_segment;
31642 saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1));
31644 // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list,
31645 // we thread onto a list first then publish it when we are done.
31646 allocator youngest_free_list;
31647 size_t youngest_free_list_space = 0;
31648 size_t youngest_free_obj_space = 0;
31650 youngest_free_list.clear();
31652 for (int i = 0; i <= (max_generation - 1); i++)
31654 generation* gen_to_reset = generation_of (i);
31655 assert (generation_free_list_space (gen_to_reset) == 0);
31656 // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added
31657 // something there.
31660 for (int i = (max_generation - 1); i >= 0; i--)
31662 generation* current_gen = generation_of (i);
31663 uint8_t* o = generation_allocation_start (current_gen);
31664 //Skip the generation gap object
31665 o = o + Align(size (o), align_const);
31666 uint8_t* end = ((i > 0) ?
31667 generation_allocation_start (generation_of (i - 1)) :
31668 heap_segment_allocated (ephemeral_heap_segment));
31670 uint8_t* plug_end = o;
31671 uint8_t* plug_start = o;
31672 BOOL marked_p = FALSE;
31676 marked_p = background_object_marked (o, TRUE);
31680 size_t plug_size = plug_start - plug_end;
31684 thread_gap (plug_end, plug_size, current_gen);
31690 make_unused_array (plug_end, plug_size);
31691 if (plug_size >= min_free_list)
31693 youngest_free_list_space += plug_size;
31694 youngest_free_list.thread_item (plug_end, plug_size);
31698 youngest_free_obj_space += plug_size;
31703 fix_brick_to_highest (plug_end, plug_start);
31704 fix_brick_to_highest (plug_start, plug_start);
31709 o = o + Align (size (o), align_const);
31715 m = background_object_marked (o, TRUE);
31718 dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
31722 while ((o < end) && !background_object_marked (o, FALSE))
31724 o = o + Align (size (o), align_const);
31729 if (plug_end != end)
31733 thread_gap (plug_end, end - plug_end, current_gen);
31734 fix_brick_to_highest (plug_end, end);
31738 heap_segment_allocated (ephemeral_heap_segment) = plug_end;
31739 // the following line is temporary.
31740 heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end;
31742 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
31744 make_unused_array (plug_end, (end - plug_end));
31746 #endif //VERIFY_HEAP
31750 dd_fragmentation (dynamic_data_of (i)) =
31751 generation_free_list_space (current_gen) + generation_free_obj_space (current_gen);
31754 generation* youngest_gen = generation_of (0);
31755 generation_free_list_space (youngest_gen) = youngest_free_list_space;
31756 generation_free_obj_space (youngest_gen) = youngest_free_obj_space;
31757 dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space;
31758 generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list);
31761 void gc_heap::background_sweep()
31763 generation* gen = generation_of (max_generation);
31764 dynamic_data* dd = dynamic_data_of (max_generation);
31765 // For SOH segments we go backwards.
31766 heap_segment* start_seg = ephemeral_heap_segment;
31767 PREFIX_ASSUME(start_seg != NULL);
31768 heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
31769 heap_segment* seg = start_seg;
31770 uint8_t* o = heap_segment_mem (seg);
31772 heap_segment* prev_seg = heap_segment_next (seg);
31773 int align_const = get_alignment_constant (TRUE);
31776 assert (o == generation_allocation_start (generation_of (max_generation)));
31777 o = o + Align(size (o), align_const);
31780 uint8_t* plug_end = o;
31781 uint8_t* plug_start = o;
31782 next_sweep_obj = o;
31783 current_sweep_pos = o;
31785 //uint8_t* end = background_next_end (seg, (gen == large_object_generation));
31786 uint8_t* end = heap_segment_background_allocated (seg);
31787 BOOL delete_p = FALSE;
31789 //concurrent_print_time_delta ("finished with mark and start with sweep");
31790 concurrent_print_time_delta ("Sw");
31791 dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
31793 //block concurrent allocation for large objects
31794 dprintf (3, ("lh state: planning"));
31795 if (gc_lh_block_event.IsValid())
31797 gc_lh_block_event.Reset();
31800 for (int i = 0; i <= (max_generation + 1); i++)
31802 generation* gen_to_reset = generation_of (i);
31803 generation_allocator (gen_to_reset)->clear();
31804 generation_free_list_space (gen_to_reset) = 0;
31805 generation_free_obj_space (gen_to_reset) = 0;
31806 generation_free_list_allocated (gen_to_reset) = 0;
31807 generation_end_seg_allocated (gen_to_reset) = 0;
31808 generation_condemned_allocated (gen_to_reset) = 0;
31809 //reset the allocation so foreground gc can allocate into older generation
31810 generation_allocation_pointer (gen_to_reset)= 0;
31811 generation_allocation_limit (gen_to_reset) = 0;
31812 generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset));
31815 FIRE_EVENT(BGC2ndNonConEnd);
31817 loh_alloc_thread_count = 0;
31818 current_bgc_state = bgc_sweep_soh;
31819 verify_soh_segment_list();
31821 #ifdef FEATURE_BASICFREEZE
31822 if ((generation_start_segment (gen) != ephemeral_heap_segment) &&
31823 ro_segments_in_range)
31825 sweep_ro_segments (generation_start_segment (gen));
31827 #endif // FEATURE_BASICFREEZE
31829 //TODO BACKGROUND_GC: can we move this to where we switch to the LOH?
31830 if (current_c_gc_state != c_gc_state_planning)
31832 current_c_gc_state = c_gc_state_planning;
31835 concurrent_print_time_delta ("Swe");
31837 heap_segment* loh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation + 1)));
31838 PREFIX_ASSUME(loh_seg != NULL);
31841 loh_seg->flags &= ~heap_segment_flags_swept;
31842 heap_segment_background_allocated (loh_seg) = heap_segment_allocated (loh_seg);
31843 loh_seg = heap_segment_next_rw (loh_seg);
31846 #ifdef MULTIPLE_HEAPS
31847 bgc_t_join.join(this, gc_join_restart_ee);
31848 if (bgc_t_join.joined())
31849 #endif //MULTIPLE_HEAPS
31851 #ifdef MULTIPLE_HEAPS
31852 dprintf(2, ("Starting BGC threads for resuming EE"));
31853 bgc_t_join.restart();
31854 #endif //MULTIPLE_HEAPS
31857 if (heap_number == 0)
31862 FIRE_EVENT(BGC2ndConBegin);
31864 background_ephemeral_sweep();
31866 concurrent_print_time_delta ("Swe eph");
31868 #ifdef MULTIPLE_HEAPS
31869 bgc_t_join.join(this, gc_join_after_ephemeral_sweep);
31870 if (bgc_t_join.joined())
31871 #endif //MULTIPLE_HEAPS
31873 #ifdef FEATURE_EVENT_TRACE
31874 bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default,
31875 GCEventKeyword_GCHeapSurvivalAndMovement,
31876 GCEventLevel_Information);
31877 #endif //FEATURE_EVENT_TRACE
31879 leave_spin_lock (&gc_lock);
31881 #ifdef MULTIPLE_HEAPS
31882 dprintf(2, ("Starting BGC threads for BGC sweeping"));
31883 bgc_t_join.restart();
31884 #endif //MULTIPLE_HEAPS
31887 disable_preemptive (true);
31889 dprintf (2, ("bgs: sweeping gen2 objects"));
31890 dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31891 (size_t)heap_segment_mem (seg),
31892 (size_t)heap_segment_allocated (seg),
31893 (size_t)heap_segment_background_allocated (seg)));
31895 int num_objs = 256;
31896 int current_num_objs = 0;
31897 heap_segment* next_seg = 0;
31903 if (gen == large_object_generation)
31905 next_seg = heap_segment_next (seg);
31909 next_seg = heap_segment_prev (fseg, seg);
31914 if (!heap_segment_read_only_p (seg))
31916 if (gen == large_object_generation)
31918 // we can treat all LOH segments as in the bgc domain
31919 // regardless of whether we saw in bgc mark or not
31920 // because we don't allow LOH allocations during bgc
31921 // sweep anyway - the LOH segments can't change.
31922 process_background_segment_end (seg, gen, plug_end,
31923 start_seg, &delete_p);
31927 assert (heap_segment_background_allocated (seg) != 0);
31928 process_background_segment_end (seg, gen, plug_end,
31929 start_seg, &delete_p);
31931 assert (next_seg || !delete_p);
31937 generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
31942 dprintf (2, ("seg %Ix has been swept", seg));
31943 seg->flags |= heap_segment_flags_swept;
31946 verify_soh_segment_list();
31950 dprintf (GTC_LOG, ("seg: %Ix, next_seg: %Ix, prev_seg: %Ix", seg, next_seg, prev_seg));
31954 generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
31956 PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
31958 if (gen != large_object_generation)
31960 dprintf (2, ("bgs: sweeping gen3 objects"));
31961 concurrent_print_time_delta ("Swe SOH");
31962 FIRE_EVENT(BGC1stSweepEnd, 0);
31964 enter_spin_lock (&more_space_lock_loh);
31965 add_saved_spinlock_info (true, me_acquire, mt_bgc_loh_sweep);
31967 concurrent_print_time_delta ("Swe LOH took msl");
31969 // We wait till all allocating threads are completely done.
31970 int spin_count = yp_spin_count_unit;
31971 while (loh_alloc_thread_count)
31973 spin_and_switch (spin_count, (loh_alloc_thread_count == 0));
31976 current_bgc_state = bgc_sweep_loh;
31977 gen = generation_of (max_generation+1);
31978 start_seg = heap_segment_rw (generation_start_segment (gen));
31980 PREFIX_ASSUME(start_seg != NULL);
31984 o = generation_allocation_start (gen);
31985 assert (method_table (o) == g_gc_pFreeObjectMethodTable);
31986 align_const = get_alignment_constant (FALSE);
31987 o = o + Align(size (o), align_const);
31989 end = heap_segment_allocated (seg);
31990 dprintf (2, ("sweeping gen3 objects"));
31991 generation_free_obj_space (gen) = 0;
31992 generation_allocator (gen)->clear();
31993 generation_free_list_space (gen) = 0;
31995 dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
31996 (size_t)heap_segment_mem (seg),
31997 (size_t)heap_segment_allocated (seg),
31998 (size_t)heap_segment_background_allocated (seg)));
32005 o = heap_segment_mem (seg);
32008 assert (gen != large_object_generation);
32009 assert (o == generation_allocation_start (generation_of (max_generation)));
32010 align_const = get_alignment_constant (TRUE);
32011 o = o + Align(size (o), align_const);
32015 current_sweep_pos = o;
32016 next_sweep_obj = o;
32019 end = background_next_end (seg, (gen == large_object_generation));
32020 dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
32021 (size_t)heap_segment_mem (seg),
32022 (size_t)heap_segment_allocated (seg),
32023 (size_t)heap_segment_background_allocated (seg)));
32027 if ((o < end) && background_object_marked (o, TRUE))
32030 if (gen == large_object_generation)
32032 dprintf (2, ("loh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end));
32035 thread_gap (plug_end, plug_start-plug_end, gen);
32036 if (gen != large_object_generation)
32038 add_gen_free (max_generation, plug_start-plug_end);
32039 fix_brick_to_highest (plug_end, plug_start);
32040 // we need to fix the brick for the next plug here 'cause an FGC can
32041 // happen and can't read a stale brick.
32042 fix_brick_to_highest (plug_start, plug_start);
32049 next_sweep_obj = o + Align(size (o), align_const);
32050 current_num_objs++;
32051 if (current_num_objs >= num_objs)
32053 current_sweep_pos = next_sweep_obj;
32056 current_num_objs = 0;
32059 o = next_sweep_obj;
32065 m = background_object_marked (o, TRUE);
32068 if (gen != large_object_generation)
32070 add_gen_plug (max_generation, plug_end-plug_start);
32071 dd_survived_size (dd) += (plug_end - plug_start);
32073 dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
32077 while ((o < end) && !background_object_marked (o, FALSE))
32079 next_sweep_obj = o + Align(size (o), align_const);;
32080 current_num_objs++;
32081 if (current_num_objs >= num_objs)
32083 current_sweep_pos = plug_end;
32084 dprintf (1234, ("f: swept till %Ix", current_sweep_pos));
32086 current_num_objs = 0;
32089 o = next_sweep_obj;
32094 size_t total_loh_size = generation_size (max_generation + 1);
32095 size_t total_soh_size = generation_sizes (generation_of (max_generation));
32097 dprintf (GTC_LOG, ("h%d: S: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
32099 dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id",
32100 generation_free_list_space (generation_of (max_generation)),
32101 generation_free_obj_space (generation_of (max_generation))));
32102 dprintf (GTC_LOG, ("h%d: end of bgc sweep: gen3 FL: %Id, FO: %Id",
32104 generation_free_list_space (generation_of (max_generation + 1)),
32105 generation_free_obj_space (generation_of (max_generation + 1))));
32107 FIRE_EVENT(BGC2ndConEnd);
32108 concurrent_print_time_delta ("background sweep");
32110 heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
32111 PREFIX_ASSUME(reset_seg != NULL);
32115 heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg);
32116 heap_segment_background_allocated (reset_seg) = 0;
32117 reset_seg = heap_segment_next_rw (reset_seg);
32120 generation* loh_gen = generation_of (max_generation + 1);
32121 generation_allocation_segment (loh_gen) = heap_segment_rw (generation_start_segment (loh_gen));
32123 // We calculate dynamic data here because if we wait till we signal the lh event,
32124 // the allocation thread can change the fragmentation and we may read an intermediate
32125 // value (which can be greater than the generation size). Plus by that time it won't
32127 compute_new_dynamic_data (max_generation);
32129 enable_preemptive ();
32131 #ifdef MULTIPLE_HEAPS
32132 bgc_t_join.join(this, gc_join_set_state_free);
32133 if (bgc_t_join.joined())
32134 #endif //MULTIPLE_HEAPS
32136 // TODO: We are using this join just to set the state. Should
32137 // look into eliminating it - check to make sure things that use
32138 // this state can live with per heap state like should_check_bgc_mark.
32139 current_c_gc_state = c_gc_state_free;
32141 #ifdef MULTIPLE_HEAPS
32142 dprintf(2, ("Starting BGC threads after background sweep phase"));
32143 bgc_t_join.restart();
32144 #endif //MULTIPLE_HEAPS
32147 disable_preemptive (true);
32149 if (gc_lh_block_event.IsValid())
32151 gc_lh_block_event.Set();
32154 add_saved_spinlock_info (true, me_release, mt_bgc_loh_sweep);
32155 leave_spin_lock (&more_space_lock_loh);
32157 //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
32158 dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index)));
32160 #endif //BACKGROUND_GC
32162 void gc_heap::sweep_large_objects ()
32164 //this min value is for the sake of the dynamic tuning.
32165 //so we know that we are not starting even if we have no
32167 generation* gen = large_object_generation;
32168 heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
32170 PREFIX_ASSUME(start_seg != NULL);
32172 heap_segment* seg = start_seg;
32173 heap_segment* prev_seg = 0;
32174 uint8_t* o = generation_allocation_start (gen);
32175 int align_const = get_alignment_constant (FALSE);
32177 //Skip the generation gap object
32178 o = o + Align(size (o), align_const);
32180 uint8_t* plug_end = o;
32181 uint8_t* plug_start = o;
32183 generation_allocator (gen)->clear();
32184 generation_free_list_space (gen) = 0;
32185 generation_free_obj_space (gen) = 0;
32188 dprintf (3, ("sweeping large objects"));
32189 dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix",
32191 (size_t)heap_segment_mem (seg),
32192 (size_t)heap_segment_allocated (seg),
32197 if (o >= heap_segment_allocated (seg))
32199 heap_segment* next_seg = heap_segment_next (seg);
32200 //delete the empty segment if not the only one
32201 if ((plug_end == heap_segment_mem (seg)) &&
32202 (seg != start_seg) && !heap_segment_read_only_p (seg))
32204 //prepare for deletion
32205 dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
32207 heap_segment_next (prev_seg) = next_seg;
32208 heap_segment_next (seg) = freeable_large_heap_segment;
32209 freeable_large_heap_segment = seg;
32213 if (!heap_segment_read_only_p (seg))
32215 dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end));
32216 heap_segment_allocated (seg) = plug_end;
32217 decommit_heap_segment_pages (seg, 0);
32226 o = heap_segment_mem (seg);
32228 dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg,
32229 (size_t)heap_segment_mem (seg),
32230 (size_t)heap_segment_allocated (seg)));
32233 if (large_object_marked(o, TRUE))
32236 //everything between plug_end and plug_start is free
32237 thread_gap (plug_end, plug_start-plug_end, gen);
32242 o = o + AlignQword (size (o));
32243 if (o >= heap_segment_allocated (seg))
32247 m = large_object_marked (o, TRUE);
32250 dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
32254 while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
32256 o = o + AlignQword (size (o));
32261 generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));
32263 PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
32266 void gc_heap::relocate_in_large_objects ()
32268 relocate_args args;
32270 args.high = gc_high;
32271 args.last_plug = 0;
32273 generation* gen = large_object_generation;
32275 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
32277 PREFIX_ASSUME(seg != NULL);
32279 uint8_t* o = generation_allocation_start (gen);
32283 if (o >= heap_segment_allocated (seg))
32285 seg = heap_segment_next_rw (seg);
32290 o = heap_segment_mem (seg);
32293 while (o < heap_segment_allocated (seg))
32295 check_class_object_demotion (o);
32296 if (contain_pointers (o))
32298 dprintf(3, ("Relocating through large object %Ix", (size_t)o));
32299 go_through_object_nostart (method_table (o), o, size(o), pval,
32301 reloc_survivor_helper (pval);
32304 o = o + AlignQword (size (o));
32309 void gc_heap::mark_through_cards_for_large_objects (card_fn fn,
32312 uint8_t* low = gc_low;
32313 size_t end_card = 0;
32314 generation* oldest_gen = generation_of (max_generation+1);
32315 heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen));
32317 PREFIX_ASSUME(seg != NULL);
32319 uint8_t* beg = generation_allocation_start (oldest_gen);
32320 uint8_t* end = heap_segment_allocated (seg);
32322 size_t cg_pointers_found = 0;
32324 size_t card_word_end = (card_of (align_on_card_word (end)) /
32329 size_t n_card_set = 0;
32330 uint8_t* next_boundary = (relocating ?
32331 generation_plan_allocation_start (generation_of (max_generation -1)) :
32334 uint8_t* nhigh = (relocating ?
32335 heap_segment_plan_allocated (ephemeral_heap_segment) :
32338 BOOL foundp = FALSE;
32339 uint8_t* start_address = 0;
32340 uint8_t* limit = 0;
32341 size_t card = card_of (beg);
32343 #ifdef BACKGROUND_GC
32344 BOOL consider_bgc_mark_p = FALSE;
32345 BOOL check_current_sweep_p = FALSE;
32346 BOOL check_saved_sweep_p = FALSE;
32347 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32348 #endif //BACKGROUND_GC
32350 size_t total_cards_cleared = 0;
32352 //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end));
32353 dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end));
32356 if ((o < end) && (card_of(o) > card))
32358 dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
32359 if (cg_pointers_found == 0)
32361 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)o));
32362 clear_cards (card, card_of((uint8_t*)o));
32363 total_cards_cleared += (card_of((uint8_t*)o) - card);
32365 n_eph +=cg_pointers_found;
32366 cg_pointers_found = 0;
32367 card = card_of ((uint8_t*)o);
32369 if ((o < end) &&(card >= end_card))
32371 foundp = find_card (card_table, card, card_word_end, end_card);
32374 n_card_set+= end_card - card;
32375 start_address = max (beg, card_address (card));
32377 limit = min (end, card_address (end_card));
32379 if ((!foundp) || (o >= end) || (card_address (card) >= end))
32381 if ((foundp) && (cg_pointers_found == 0))
32383 dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
32384 (size_t)card_address(card+1)));
32385 clear_cards (card, card+1);
32386 total_cards_cleared += 1;
32388 n_eph +=cg_pointers_found;
32389 cg_pointers_found = 0;
32390 if ((seg = heap_segment_next_rw (seg)) != 0)
32392 #ifdef BACKGROUND_GC
32393 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
32394 #endif //BACKGROUND_GC
32395 beg = heap_segment_mem (seg);
32396 end = compute_next_end (seg, low);
32397 card_word_end = card_of (align_on_card_word (end)) / card_word_width;
32398 card = card_of (beg);
32409 assert (card_set_p (card));
32411 dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ",
32412 card, (size_t)o, (size_t)limit));
32414 assert (Align (size (o)) >= Align (min_obj_size));
32415 size_t s = size (o);
32416 uint8_t* next_o = o + AlignQword (s);
32422 assert (Align (s) >= Align (min_obj_size));
32423 next_o = o + AlignQword (s);
32426 dprintf (4, ("|%Ix|", (size_t)o));
32427 if (next_o < start_address)
32432 #ifdef BACKGROUND_GC
32433 if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
32437 #endif //BACKGROUND_GC
32439 #ifdef COLLECTIBLE_CLASS
32440 if (is_collectible(o))
32442 BOOL passed_end_card_p = FALSE;
32444 if (card_of (o) > card)
32446 passed_end_card_p = card_transition (o, end, card_word_end,
32450 foundp, start_address,
32451 limit, total_cards_cleared);
32454 if ((!passed_end_card_p || foundp) && (card_of (o) == card))
32456 // card is valid and it covers the head of the object
32457 if (fn == &gc_heap::relocate_address)
32459 keep_card_live (o, n_gen, cg_pointers_found);
32463 uint8_t* class_obj = get_class_object (o);
32464 mark_through_cards_helper (&class_obj, n_gen,
32465 cg_pointers_found, fn,
32466 nhigh, next_boundary);
32470 if (passed_end_card_p)
32472 if (foundp && (card_address (card) < next_o))
32474 goto go_through_refs;
32484 #endif //COLLECTIBLE_CLASS
32486 if (contain_pointers (o))
32488 dprintf(3,("Going through %Ix", (size_t)o));
32490 go_through_object (method_table(o), o, s, poo,
32491 start_address, use_start, (o + s),
32493 if (card_of ((uint8_t*)poo) > card)
32495 BOOL passed_end_card_p = card_transition ((uint8_t*)poo, end,
32500 foundp, start_address,
32501 limit, total_cards_cleared);
32503 if (passed_end_card_p)
32505 if (foundp && (card_address (card) < next_o))
32509 if (ppstop <= (uint8_t**)start_address)
32511 else if (poo < (uint8_t**)start_address)
32512 {poo = (uint8_t**)start_address;}
32522 mark_through_cards_helper (poo, n_gen,
32523 cg_pointers_found, fn,
32524 nhigh, next_boundary);
32536 // compute the efficiency ratio of the card table
32539 generation_skip_ratio = min (((n_eph > 800) ?
32540 (int)(((float)n_gen / (float)n_eph) * 100) : 100),
32541 generation_skip_ratio);
32543 dprintf (3, ("Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d",
32544 n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio));
32548 dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d",
32549 n_eph, n_gen, n_card_set, generation_skip_ratio));
32553 void gc_heap::descr_segment (heap_segment* seg )
32556 uint8_t* x = heap_segment_mem (seg);
32557 while (x < heap_segment_allocated (seg))
32559 dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
32560 x = x + Align(size (x));
32563 UNREFERENCED_PARAMETER(seg);
32567 void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
32569 #ifdef MULTIPLE_HEAPS
32570 int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
32571 for (int i = 0; i < n_heaps; i++)
32573 gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
32574 #else //MULTIPLE_HEAPS
32576 gc_heap* hp = NULL;
32578 // prefix complains about us dereferencing hp in wks build even though we only access static members
32579 // this way. not sure how to shut it up except for this ugly workaround:
32580 PREFIX_ASSUME(hp != NULL);
32581 #endif // _PREFAST_
32582 #endif //MULTIPLE_HEAPS
32584 int curr_gen_number0 = max_generation+1;
32585 while (curr_gen_number0 >= 0)
32587 generation* gen = hp->generation_of (curr_gen_number0);
32588 heap_segment* seg = generation_start_segment (gen);
32589 while (seg && (seg != hp->ephemeral_heap_segment))
32591 assert (curr_gen_number0 > 0);
32593 // report bounds from heap_segment_mem (seg) to
32594 // heap_segment_allocated (seg);
32595 // for generation # curr_gen_number0
32596 // for heap # heap_no
32598 fn(context, curr_gen_number0, heap_segment_mem (seg),
32599 heap_segment_allocated (seg),
32600 curr_gen_number0 == max_generation+1 ? heap_segment_reserved (seg) : heap_segment_allocated (seg));
32602 seg = heap_segment_next (seg);
32606 assert (seg == hp->ephemeral_heap_segment);
32607 assert (curr_gen_number0 <= max_generation);
32609 if (curr_gen_number0 == max_generation)
32611 if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1)))
32613 // report bounds from heap_segment_mem (seg) to
32614 // generation_allocation_start (generation_of (max_generation-1))
32615 // for heap # heap_number
32617 fn(context, curr_gen_number0, heap_segment_mem (seg),
32618 generation_allocation_start (hp->generation_of (max_generation-1)),
32619 generation_allocation_start (hp->generation_of (max_generation-1)) );
32622 else if (curr_gen_number0 != 0)
32624 //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32625 // to generation_allocation_start (generation_of (curr_gen_number0-1))
32626 // for heap # heap_number
32628 fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32629 generation_allocation_start (hp->generation_of (curr_gen_number0-1)),
32630 generation_allocation_start (hp->generation_of (curr_gen_number0-1)));
32634 //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
32635 // to heap_segment_allocated (ephemeral_heap_segment);
32636 // for heap # heap_number
32638 fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
32639 heap_segment_allocated (hp->ephemeral_heap_segment),
32640 heap_segment_reserved (hp->ephemeral_heap_segment) );
32643 curr_gen_number0--;
32649 // Note that when logging is on it can take a long time to go through the free items.
32650 void gc_heap::print_free_list (int gen, heap_segment* seg)
32652 UNREFERENCED_PARAMETER(gen);
32653 UNREFERENCED_PARAMETER(seg);
32655 if (settings.concurrent == FALSE)
32657 uint8_t* seg_start = heap_segment_mem (seg);
32658 uint8_t* seg_end = heap_segment_allocated (seg);
32660 dprintf (3, ("Free list in seg %Ix:", seg_start));
32662 size_t total_free_item = 0;
32664 allocator* gen_allocator = generation_allocator (generation_of (gen));
32665 for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++)
32667 uint8_t* fo = gen_allocator->alloc_list_head_of (b);
32670 if (fo >= seg_start && fo < seg_end)
32674 size_t free_item_len = size(fo);
32676 dprintf (3, ("[%Ix, %Ix[:%Id",
32678 (size_t)(fo + free_item_len),
32682 fo = free_list_slot (fo);
32686 dprintf (3, ("total %Id free items", total_free_item));
32692 void gc_heap::descr_generations (BOOL begin_gc_p)
32694 UNREFERENCED_PARAMETER(begin_gc_p);
32696 if (StressLog::StressLogOn(LF_GC, LL_INFO10))
32699 #ifdef MULTIPLE_HEAPS
32701 #endif //MULTIPLE_HEAPS
32703 STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp);
32704 for (int n = max_generation; n >= 0; --n)
32706 STRESS_LOG4(LF_GC, LL_INFO10, " Generation %d [%p, %p] cur = %p\n",
32708 generation_allocation_start(generation_of(n)),
32709 generation_allocation_limit(generation_of(n)),
32710 generation_allocation_pointer(generation_of(n)));
32712 heap_segment* seg = generation_start_segment(generation_of(n));
32715 STRESS_LOG4(LF_GC, LL_INFO10, " Segment mem %p alloc = %p used %p committed %p\n",
32716 heap_segment_mem(seg),
32717 heap_segment_allocated(seg),
32718 heap_segment_used(seg),
32719 heap_segment_committed(seg));
32720 seg = heap_segment_next(seg);
32724 #endif // STRESS_LOG
32727 dprintf (2, ("lowest_address: %Ix highest_address: %Ix",
32728 (size_t) lowest_address, (size_t) highest_address));
32729 #ifdef BACKGROUND_GC
32730 dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix",
32731 (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address));
32732 #endif //BACKGROUND_GC
32734 if (heap_number == 0)
32736 dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size()));
32739 int curr_gen_number = max_generation+1;
32740 while (curr_gen_number >= 0)
32742 size_t total_gen_size = generation_size (curr_gen_number);
32743 #ifdef SIMPLE_DPRINTF
32744 dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s",
32745 (begin_gc_p ? "BEG" : "END"),
32746 settings.condemned_generation,
32749 dd_fragmentation (dynamic_data_of (curr_gen_number)),
32750 generation_free_list_space (generation_of (curr_gen_number)),
32751 generation_free_obj_space (generation_of (curr_gen_number)),
32753 (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) :
32755 (begin_gc_p ? ("") : (settings.compaction ? "(compact)" : "(sweep)")),
32756 (settings.heap_expansion ? "(EX)" : " "),
32757 (settings.promotion ? "Promotion" : "NoPromotion")));
32759 dprintf (2, ( "Generation %d: gap size: %d, generation size: %Id, fragmentation: %Id",
32761 size (generation_allocation_start (generation_of (curr_gen_number))),
32763 dd_fragmentation (dynamic_data_of (curr_gen_number))));
32764 #endif //SIMPLE_DPRINTF
32766 generation* gen = generation_of (curr_gen_number);
32767 heap_segment* seg = generation_start_segment (gen);
32768 while (seg && (seg != ephemeral_heap_segment))
32770 dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
32772 (size_t)heap_segment_mem (seg),
32773 (size_t)heap_segment_allocated (seg),
32774 (size_t)heap_segment_committed (seg),
32775 (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)),
32776 (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg))));
32777 print_free_list (curr_gen_number, seg);
32778 seg = heap_segment_next (seg);
32780 if (seg && (seg != generation_start_segment (gen)))
32782 dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32784 (size_t)heap_segment_mem (seg),
32785 (size_t)generation_allocation_start (generation_of (curr_gen_number-1))));
32786 print_free_list (curr_gen_number, seg);
32791 dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
32793 (size_t)generation_allocation_start (generation_of (curr_gen_number)),
32794 (size_t)(((curr_gen_number == 0)) ?
32795 (heap_segment_allocated
32796 (generation_start_segment
32797 (generation_of (curr_gen_number)))) :
32798 (generation_allocation_start
32799 (generation_of (curr_gen_number - 1))))
32801 print_free_list (curr_gen_number, seg);
32813 //-----------------------------------------------------------------------------
32815 // VM Specific support
32817 //-----------------------------------------------------------------------------
32822 unsigned int PromotedObjectCount = 0;
32823 unsigned int CreatedObjectCount = 0;
32824 unsigned int AllocDuration = 0;
32825 unsigned int AllocCount = 0;
32826 unsigned int AllocBigCount = 0;
32827 unsigned int AllocSmallCount = 0;
32828 unsigned int AllocStart = 0;
32831 //Static member variables.
32832 VOLATILE(BOOL) GCHeap::GcInProgress = FALSE;
32834 //CMCSafeLock* GCHeap::fGcLock;
32835 GCEvent *GCHeap::WaitForGCEvent = NULL;
32838 unsigned int GCHeap::GcDuration;
32840 unsigned GCHeap::GcCondemnedGeneration = 0;
32841 size_t GCHeap::totalSurvivedSize = 0;
32842 #ifdef FEATURE_PREMORTEM_FINALIZATION
32843 CFinalize* GCHeap::m_Finalize = 0;
32844 BOOL GCHeap::GcCollectClasses = FALSE;
32845 VOLATILE(int32_t) GCHeap::m_GCFLock = 0;
32847 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
32849 #ifdef BACKGROUND_GC
32850 int GCHeap::gc_stress_fgcs_in_bgc = 0;
32851 #endif // BACKGROUND_GC
32852 #ifndef MULTIPLE_HEAPS
32853 OBJECTHANDLE GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
32854 int GCHeap::m_CurStressObj = 0;
32855 #endif // !MULTIPLE_HEAPS
32856 #endif // STRESS_HEAP
32857 #endif // FEATURE_REDHAWK
32859 #endif //FEATURE_PREMORTEM_FINALIZATION
32861 class NoGCRegionLockHolder
32864 NoGCRegionLockHolder()
32866 enter_spin_lock_noinstru(&g_no_gc_lock);
32869 ~NoGCRegionLockHolder()
32871 leave_spin_lock_noinstru(&g_no_gc_lock);
32875 // An explanation of locking for finalization:
32877 // Multiple threads allocate objects. During the allocation, they are serialized by
32878 // the AllocLock above. But they release that lock before they register the object
32879 // for finalization. That's because there is much contention for the alloc lock, but
32880 // finalization is presumed to be a rare case.
32882 // So registering an object for finalization must be protected by the FinalizeLock.
32884 // There is another logical queue that involves finalization. When objects registered
32885 // for finalization become unreachable, they are moved from the "registered" queue to
32886 // the "unreachable" queue. Note that this only happens inside a GC, so no other
32887 // threads can be manipulating either queue at that time. Once the GC is over and
32888 // threads are resumed, the Finalizer thread will dequeue objects from the "unreachable"
32889 // queue and call their finalizers. This dequeue operation is also protected with
32890 // the finalize lock.
32892 // At first, this seems unnecessary. Only one thread is ever enqueuing or dequeuing
32893 // on the unreachable queue (either the GC thread during a GC or the finalizer thread
32894 // when a GC is not in progress). The reason we share a lock with threads enqueuing
32895 // on the "registered" queue is that the "registered" and "unreachable" queues are
32898 // They are actually two regions of a longer list, which can only grow at one end.
32899 // So to enqueue an object to the "registered" list, you actually rotate an unreachable
32900 // object at the boundary between the logical queues, out to the other end of the
32901 // unreachable queue -- where all growing takes place. Then you move the boundary
32902 // pointer so that the gap we created at the boundary is now on the "registered"
32903 // side rather than the "unreachable" side. Now the object can be placed into the
32904 // "registered" side at that point. This is much more efficient than doing moves
32905 // of arbitrarily long regions, but it causes the two queues to require a shared lock.
32907 // Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock. Instead, it relies
32908 // on the fact that the lock will only be taken for a brief period and that it will
32909 // never provoke or allow a GC while the lock is held. This is critical. If the
32910 // FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to
32911 // allow a GC), then the Alloc client would have to GC protect a finalizable object
32912 // to protect against that eventuality. That is too slow!
32916 BOOL IsValidObject99(uint8_t *pObject)
32919 if (!((CObjectHeader*)pObject)->IsFree())
32920 ((CObjectHeader *) pObject)->Validate();
32921 #endif //VERIFY_HEAP
32925 #ifdef BACKGROUND_GC
32926 BOOL gc_heap::bgc_mark_array_range (heap_segment* seg,
32928 uint8_t** range_beg,
32929 uint8_t** range_end)
32931 uint8_t* seg_start = heap_segment_mem (seg);
32932 uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg)));
32934 if ((seg_start < background_saved_highest_address) &&
32935 (seg_end > background_saved_lowest_address))
32937 *range_beg = max (seg_start, background_saved_lowest_address);
32938 *range_end = min (seg_end, background_saved_highest_address);
32947 void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
32949 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32950 if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
32952 uint8_t* range_beg = 0;
32953 uint8_t* range_end = 0;
32955 if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end))
32957 size_t markw = mark_word_of (range_beg);
32958 size_t markw_end = mark_word_of (range_end);
32959 while (markw < markw_end)
32961 if (mark_array [markw])
32963 dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
32964 markw, mark_array [markw], mark_word_address (markw)));
32969 uint8_t* p = mark_word_address (markw_end);
32970 while (p < range_end)
32972 assert (!(mark_array_marked (p)));
32977 #endif //VERIFY_HEAP && MARK_ARRAY
32980 void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
32982 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
32983 size_t start_mark_bit = mark_bit_of (obj) + 1;
32984 size_t end_mark_bit = mark_bit_of (obj + s);
32985 unsigned int startbit = mark_bit_bit (start_mark_bit);
32986 unsigned int endbit = mark_bit_bit (end_mark_bit);
32987 size_t startwrd = mark_bit_word (start_mark_bit);
32988 size_t endwrd = mark_bit_word (end_mark_bit);
32989 unsigned int result = 0;
32991 unsigned int firstwrd = ~(lowbits (~0, startbit));
32992 unsigned int lastwrd = ~(highbits (~0, endbit));
32994 if (startwrd == endwrd)
32996 unsigned int wrd = firstwrd & lastwrd;
32997 result = mark_array[startwrd] & wrd;
33005 // verify the first mark word is cleared.
33008 result = mark_array[startwrd] & firstwrd;
33016 for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
33018 result = mark_array[wrdtmp];
33025 // set the last mark word.
33028 result = mark_array[endwrd] & lastwrd;
33034 #endif //VERIFY_HEAP && MARK_ARRAY
33037 void gc_heap::clear_all_mark_array()
33040 //size_t num_dwords_written = 0;
33041 //size_t begin_time = GetHighPrecisionTimeStamp();
33043 generation* gen = generation_of (max_generation);
33044 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33050 if (gen != large_object_generation)
33052 gen = generation_of (max_generation+1);
33053 seg = heap_segment_rw (generation_start_segment (gen));
33061 uint8_t* range_beg = 0;
33062 uint8_t* range_end = 0;
33064 if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end))
33066 size_t markw = mark_word_of (range_beg);
33067 size_t markw_end = mark_word_of (range_end);
33068 size_t size_total = (markw_end - markw) * sizeof (uint32_t);
33069 //num_dwords_written = markw_end - markw;
33071 size_t size_left = 0;
33073 assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0);
33075 if ((size_total & (sizeof(PTR_PTR) - 1)) != 0)
33077 size = (size_total & ~(sizeof(PTR_PTR) - 1));
33078 size_left = size_total - size;
33079 assert ((size_left & (sizeof (uint32_t) - 1)) == 0);
33086 memclr ((uint8_t*)&mark_array[markw], size);
33088 if (size_left != 0)
33090 uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)];
33091 for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++)
33093 *markw_to_clear = 0;
33099 seg = heap_segment_next_rw (seg);
33102 //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;
33104 //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
33106 #endif //MARK_ARRAY
33109 #endif //BACKGROUND_GC
33111 void gc_heap::verify_mark_array_cleared (heap_segment* seg)
33113 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
33114 assert (card_table == g_gc_card_table);
33115 size_t markw = mark_word_of (heap_segment_mem (seg));
33116 size_t markw_end = mark_word_of (heap_segment_reserved (seg));
33118 while (markw < markw_end)
33120 if (mark_array [markw])
33122 dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
33123 markw, mark_array [markw], mark_word_address (markw)));
33128 #endif //VERIFY_HEAP && MARK_ARRAY
33131 void gc_heap::verify_mark_array_cleared ()
33133 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
33134 if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
33136 generation* gen = generation_of (max_generation);
33137 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33143 if (gen != large_object_generation)
33145 gen = generation_of (max_generation+1);
33146 seg = heap_segment_rw (generation_start_segment (gen));
33154 bgc_verify_mark_array_cleared (seg);
33155 seg = heap_segment_next_rw (seg);
33158 #endif //VERIFY_HEAP && MARK_ARRAY
33161 void gc_heap::verify_seg_end_mark_array_cleared()
33163 #if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
33164 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
33166 generation* gen = generation_of (max_generation);
33167 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33173 if (gen != large_object_generation)
33175 gen = generation_of (max_generation+1);
33176 seg = heap_segment_rw (generation_start_segment (gen));
33184 // We already cleared all mark array bits for ephemeral generations
33185 // at the beginning of bgc sweep
33186 uint8_t* from = ((seg == ephemeral_heap_segment) ?
33187 generation_allocation_start (generation_of (max_generation - 1)) :
33188 heap_segment_allocated (seg));
33189 size_t markw = mark_word_of (align_on_mark_word (from));
33190 size_t markw_end = mark_word_of (heap_segment_reserved (seg));
33192 while (from < mark_word_address (markw))
33194 if (is_mark_bit_set (from))
33196 dprintf (3, ("mark bit for %Ix was not cleared", from));
33200 from += mark_bit_pitch;
33203 while (markw < markw_end)
33205 if (mark_array [markw])
33207 dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
33208 markw, mark_array [markw], mark_word_address (markw)));
33213 seg = heap_segment_next_rw (seg);
33216 #endif //VERIFY_HEAP && MARK_ARRAY
33219 // This function is called to make sure we don't mess up the segment list
33220 // in SOH. It's called by:
33221 // 1) begin and end of ephemeral GCs
33222 // 2) during bgc sweep when we switch segments.
33223 void gc_heap::verify_soh_segment_list()
33226 if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
33228 generation* gen = generation_of (max_generation);
33229 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33230 heap_segment* last_seg = 0;
33234 seg = heap_segment_next_rw (seg);
33236 if (last_seg != ephemeral_heap_segment)
33241 #endif //VERIFY_HEAP
33244 // This function can be called at any foreground GCs or blocking GCs. For background GCs,
33245 // it can be called at the end of the final marking; and at any point during background
33247 // NOTE - to be able to call this function during background sweep, we need to temporarily
33248 // NOT clear the mark array bits as we go.
33249 void gc_heap::verify_partial ()
33251 #ifdef BACKGROUND_GC
33252 //printf ("GC#%d: Verifying loh during sweep\n", settings.gc_index);
33253 //generation* gen = large_object_generation;
33254 generation* gen = generation_of (max_generation);
33255 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
33256 int align_const = get_alignment_constant (gen != large_object_generation);
33262 // Different ways to fail.
33263 BOOL mark_missed_p = FALSE;
33264 BOOL bad_ref_p = FALSE;
33265 BOOL free_ref_p = FALSE;
33271 if (gen != large_object_generation)
33274 gen = large_object_generation;
33275 align_const = get_alignment_constant (gen != large_object_generation);
33276 seg = heap_segment_rw (generation_start_segment (gen));
33285 o = heap_segment_mem (seg);
33286 end = heap_segment_allocated (seg);
33287 //printf ("validating [%Ix-[%Ix\n", o, end);
33292 BOOL marked_p = background_object_marked (o, FALSE);
33296 go_through_object_cl (method_table (o), o, s, oo,
33300 //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o));
33301 MethodTable *pMT = method_table (*oo);
33303 if (pMT == g_gc_pFreeObjectMethodTable)
33309 if (!pMT->SanityCheck())
33312 dprintf (3, ("Bad member of %Ix %Ix",
33313 (size_t)oo, (size_t)*oo));
33317 if (current_bgc_state == bgc_final_marking)
33319 if (marked_p && !background_object_marked (*oo, FALSE))
33321 mark_missed_p = TRUE;
33330 o = o + Align(s, align_const);
33332 seg = heap_segment_next_rw (seg);
33335 //printf ("didn't find any large object large enough...\n");
33336 //printf ("finished verifying loh\n");
33337 #endif //BACKGROUND_GC
33343 gc_heap::verify_free_lists ()
33345 for (int gen_num = 0; gen_num <= max_generation+1; gen_num++)
33347 dprintf (3, ("Verifying free list for gen:%d", gen_num));
33348 allocator* gen_alloc = generation_allocator (generation_of (gen_num));
33349 size_t sz = gen_alloc->first_bucket_size();
33350 bool verify_undo_slot = (gen_num != 0) && (gen_num != max_generation+1) && !gen_alloc->discard_if_no_fit_p();
33352 for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++)
33354 uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number);
33358 if (!((CObjectHeader*)free_list)->IsFree())
33360 dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't a free object)",
33361 (size_t)free_list));
33364 if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz))
33365 || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2)))
33367 dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket",
33368 (size_t)free_list));
33371 if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY))
33373 dprintf (3, ("Verifiying Heap: curr free list item %Ix has non empty undo slot",
33374 (size_t)free_list));
33377 if ((gen_num != max_generation+1)&&(object_gennum (free_list)!= gen_num))
33379 dprintf (3, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list",
33380 (size_t)free_list));
33385 free_list = free_list_slot (free_list);
33387 //verify the sanity of the tail
33388 uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number);
33389 if (!((tail == 0) || (tail == prev)))
33391 dprintf (3, ("Verifying Heap: tail of free list is not correct"));
33396 uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number);
33397 if ((head != 0) && (free_list_slot (head) != 0))
33399 dprintf (3, ("Verifying Heap: tail of free list is not correct"));
33410 gc_heap::verify_heap (BOOL begin_gc_p)
33412 int heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel());
33413 size_t last_valid_brick = 0;
33414 BOOL bCurrentBrickInvalid = FALSE;
33415 BOOL large_brick_p = TRUE;
33416 size_t curr_brick = 0;
33417 size_t prev_brick = (size_t)-1;
33418 int curr_gen_num = max_generation+1;
33419 heap_segment* seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num ) ));
33421 PREFIX_ASSUME(seg != NULL);
33423 uint8_t* curr_object = heap_segment_mem (seg);
33424 uint8_t* prev_object = 0;
33425 uint8_t* begin_youngest = generation_allocation_start(generation_of(0));
33426 uint8_t* end_youngest = heap_segment_allocated (ephemeral_heap_segment);
33427 uint8_t* next_boundary = generation_allocation_start (generation_of (max_generation - 1));
33428 int align_const = get_alignment_constant (FALSE);
33429 size_t total_objects_verified = 0;
33430 size_t total_objects_verified_deep = 0;
33432 #ifdef BACKGROUND_GC
33433 BOOL consider_bgc_mark_p = FALSE;
33434 BOOL check_current_sweep_p = FALSE;
33435 BOOL check_saved_sweep_p = FALSE;
33436 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33437 #endif //BACKGROUND_GC
33439 #ifdef MULTIPLE_HEAPS
33440 t_join* current_join = &gc_t_join;
33441 #ifdef BACKGROUND_GC
33442 if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
33444 // We always call verify_heap on entry of GC on the SVR GC threads.
33445 current_join = &bgc_t_join;
33447 #endif //BACKGROUND_GC
33448 #endif //MULTIPLE_HEAPS
33450 UNREFERENCED_PARAMETER(begin_gc_p);
33451 #ifdef BACKGROUND_GC
33452 dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin",
33453 (begin_gc_p ? "BEG" : "END"),
33454 VolatileLoad(&settings.gc_index),
33455 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
33457 dprintf (2,("[%s]GC#%d: Verifying heap - begin",
33458 (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
33459 #endif //BACKGROUND_GC
33461 #ifndef MULTIPLE_HEAPS
33462 if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
33463 (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
33467 #endif //MULTIPLE_HEAPS
33469 #ifdef BACKGROUND_GC
33470 //don't touch the memory because the program is allocating from it.
33471 if (!settings.concurrent)
33472 #endif //BACKGROUND_GC
33474 if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL))
33476 //uninit the unused portions of segments.
33477 generation* gen1 = large_object_generation;
33478 heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1));
33479 PREFIX_ASSUME(seg1 != NULL);
33485 uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew;
33486 if (heap_segment_used (seg1) > clear_start)
33488 dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa",
33489 heap_segment_mem (seg1),
33491 heap_segment_used (seg1)));
33492 memset (heap_segment_allocated (seg1) - plug_skew, 0xaa,
33493 (heap_segment_used (seg1) - clear_start));
33495 seg1 = heap_segment_next_rw (seg1);
33499 if (gen1 == large_object_generation)
33501 gen1 = generation_of (max_generation);
33502 seg1 = heap_segment_rw (generation_start_segment (gen1));
33503 PREFIX_ASSUME(seg1 != NULL);
33514 #ifdef MULTIPLE_HEAPS
33515 current_join->join(this, gc_join_verify_copy_table);
33516 if (current_join->joined())
33518 // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point
33519 for (int i = 0; i < n_heaps; i++)
33521 //copy the card and brick tables
33522 if (g_gc_card_table != g_heaps[i]->card_table)
33524 g_heaps[i]->copy_brick_card_table();
33528 current_join->restart();
33531 if (g_gc_card_table != card_table)
33532 copy_brick_card_table();
33533 #endif //MULTIPLE_HEAPS
33535 //verify that the generation structures makes sense
33537 generation* gen = generation_of (max_generation);
33539 assert (generation_allocation_start (gen) ==
33540 heap_segment_mem (heap_segment_rw (generation_start_segment (gen))));
33541 int gen_num = max_generation-1;
33542 generation* prev_gen = gen;
33543 while (gen_num >= 0)
33545 gen = generation_of (gen_num);
33546 assert (generation_allocation_segment (gen) == ephemeral_heap_segment);
33547 assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment));
33548 assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment));
33550 if (generation_start_segment (prev_gen ) ==
33551 generation_start_segment (gen))
33553 assert (generation_allocation_start (prev_gen) <
33554 generation_allocation_start (gen));
33563 // Handle segment transitions
33564 if (curr_object >= heap_segment_allocated (seg))
33566 if (curr_object > heap_segment_allocated(seg))
33568 dprintf (3, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix)",
33569 (size_t)curr_object, (size_t)seg));
33572 seg = heap_segment_next_in_range (seg);
33575 #ifdef BACKGROUND_GC
33576 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33577 #endif //BACKGROUND_GC
33578 curr_object = heap_segment_mem(seg);
33584 if (curr_gen_num == (max_generation+1))
33587 seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num)));
33589 PREFIX_ASSUME(seg != NULL);
33591 #ifdef BACKGROUND_GC
33592 should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
33593 #endif //BACKGROUND_GC
33594 curr_object = heap_segment_mem (seg);
33596 large_brick_p = FALSE;
33597 align_const = get_alignment_constant (TRUE);
33600 break; // Done Verifying Heap -- no more segments
33604 // Are we at the end of the youngest_generation?
33605 if (seg == ephemeral_heap_segment)
33607 if (curr_object >= end_youngest)
33609 // prev_object length is too long if we hit this int3
33610 if (curr_object > end_youngest)
33612 dprintf (3, ("Verifiying Heap: curr_object: %Ix > end_youngest: %Ix",
33613 (size_t)curr_object, (size_t)end_youngest));
33619 if ((curr_object >= next_boundary) && (curr_gen_num > 0))
33622 if (curr_gen_num > 0)
33624 next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1));
33629 //if (is_mark_set (curr_object))
33631 // printf ("curr_object: %Ix is marked!",(size_t)curr_object);
33632 // FATAL_GC_ERROR();
33635 size_t s = size (curr_object);
33636 dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s));
33639 dprintf (3, ("Verifying Heap: size of current object %Ix == 0", curr_object));
33643 // If object is not in the youngest generation, then lets
33644 // verify that the brick table is correct....
33645 if (((seg != ephemeral_heap_segment) ||
33646 (brick_of(curr_object) < brick_of(begin_youngest))))
33648 curr_brick = brick_of(curr_object);
33650 // Brick Table Verification...
33652 // On brick transition
33653 // if brick is negative
33654 // verify that brick indirects to previous valid brick
33656 // set current brick invalid flag to be flipped if we
33657 // encounter an object at the correct place
33659 if (curr_brick != prev_brick)
33661 // If the last brick we were examining had positive
33662 // entry but we never found the matching object, then
33663 // we have a problem
33664 // If prev_brick was the last one of the segment
33665 // it's ok for it to be invalid because it is never looked at
33666 if (bCurrentBrickInvalid &&
33667 (curr_brick != brick_of (heap_segment_mem (seg))) &&
33668 !heap_segment_read_only_p (seg))
33670 dprintf (3, ("curr brick %Ix invalid", curr_brick));
33676 //large objects verify the table only if they are in
33678 if ((heap_segment_reserved (seg) <= highest_address) &&
33679 (heap_segment_mem (seg) >= lowest_address) &&
33680 brick_table [curr_brick] != 0)
33682 dprintf (3, ("curr_brick %Ix for large object %Ix not set to -32768",
33683 curr_brick, (size_t)curr_object));
33688 bCurrentBrickInvalid = FALSE;
33693 // If the current brick contains a negative value make sure
33694 // that the indirection terminates at the last valid brick
33695 if (brick_table [curr_brick] <= 0)
33697 if (brick_table [curr_brick] == 0)
33699 dprintf(3, ("curr_brick %Ix for object %Ix set to 0",
33700 curr_brick, (size_t)curr_object));
33703 ptrdiff_t i = curr_brick;
33704 while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) &&
33705 (brick_table[i] < 0))
33707 i = i + brick_table[i];
33709 if (i < ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1))
33711 dprintf (3, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix",
33712 i, brick_of (heap_segment_mem (seg)),
33716 // if (i != last_valid_brick)
33717 // FATAL_GC_ERROR();
33718 bCurrentBrickInvalid = FALSE;
33720 else if (!heap_segment_read_only_p (seg))
33722 bCurrentBrickInvalid = TRUE;
33727 if (bCurrentBrickInvalid)
33729 if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1))
33731 bCurrentBrickInvalid = FALSE;
33732 last_valid_brick = curr_brick;
33737 if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable)
33739 #ifdef FEATURE_LOH_COMPACTION
33740 if ((curr_gen_num == (max_generation+1)) && (prev_object != 0))
33742 assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable);
33744 #endif //FEATURE_LOH_COMPACTION
33746 total_objects_verified++;
33748 BOOL can_verify_deep = TRUE;
33749 #ifdef BACKGROUND_GC
33750 can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p);
33751 #endif //BACKGROUND_GC
33753 BOOL deep_verify_obj = can_verify_deep;
33754 if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
33755 deep_verify_obj = FALSE;
33757 ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
33759 if (can_verify_deep)
33761 if (curr_gen_num > 0)
33763 BOOL need_card_p = FALSE;
33764 if (contain_pointers_or_collectible (curr_object))
33766 dprintf (4, ("curr_object: %Ix", (size_t)curr_object));
33767 size_t crd = card_of (curr_object);
33768 BOOL found_card_p = card_set_p (crd);
33770 #ifdef COLLECTIBLE_CLASS
33771 if (is_collectible(curr_object))
33773 uint8_t* class_obj = get_class_object (curr_object);
33774 if ((class_obj < ephemeral_high) && (class_obj >= next_boundary))
33778 dprintf (3, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix",
33779 card_of (curr_object), (size_t)curr_object, class_obj));
33785 #endif //COLLECTIBLE_CLASS
33787 if (contain_pointers(curr_object))
33789 go_through_object_nostart
33790 (method_table(curr_object), curr_object, s, oo,
33792 if ((crd != card_of ((uint8_t*)oo)) && !found_card_p)
33794 crd = card_of ((uint8_t*)oo);
33795 found_card_p = card_set_p (crd);
33796 need_card_p = FALSE;
33798 if ((*oo < ephemeral_high) && (*oo >= next_boundary))
33800 need_card_p = TRUE;
33803 if (need_card_p && !found_card_p)
33806 dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33807 card_of (curr_object), (size_t)curr_object,
33808 card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33814 if (need_card_p && !found_card_p)
33816 dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
33817 card_of (curr_object), (size_t)curr_object,
33818 card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
33823 total_objects_verified_deep++;
33827 prev_object = curr_object;
33828 prev_brick = curr_brick;
33829 curr_object = curr_object + Align(s, align_const);
33830 if (curr_object < prev_object)
33832 dprintf (3, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s));
33837 #ifdef BACKGROUND_GC
33838 dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id",
33839 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
33840 (begin_gc_p ? "BEG" : "END"),
33841 ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
33842 total_objects_verified, total_objects_verified_deep));
33843 if (current_c_gc_state != c_gc_state_planning)
33845 assert (total_objects_verified == total_objects_verified_deep);
33847 #endif //BACKGROUND_GC
33849 verify_free_lists();
33851 #ifdef FEATURE_PREMORTEM_FINALIZATION
33852 finalize_queue->CheckFinalizerObjects();
33853 #endif // FEATURE_PREMORTEM_FINALIZATION
33856 // to be consistent with handle table APIs pass a ScanContext*
33857 // to provide the heap number. the SC isn't complete though so
33858 // limit its scope to handle table verification.
33860 sc.thread_number = heap_number;
33861 GCScan::VerifyHandleTable(max_generation, max_generation, &sc);
33864 #ifdef MULTIPLE_HEAPS
33865 current_join->join(this, gc_join_verify_objects_done);
33866 if (current_join->joined())
33867 #endif //MULTIPLE_HEAPS
33869 GCToEEInterface::VerifySyncTableEntry();
33870 #ifdef MULTIPLE_HEAPS
33871 current_join->restart();
33872 #endif //MULTIPLE_HEAPS
33875 #ifdef BACKGROUND_GC
33876 if (!settings.concurrent)
33878 if (current_c_gc_state == c_gc_state_planning)
33880 // temporarily commenting this out 'cause an FGC
33881 // could be triggered before we sweep ephemeral.
33882 //verify_seg_end_mark_array_cleared();
33886 if (settings.concurrent)
33888 verify_mark_array_cleared();
33890 dprintf (2,("GC%d(%s): Verifying heap - end",
33891 VolatileLoad(&settings.gc_index),
33892 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
33894 dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
33895 #endif //BACKGROUND_GC
33898 #endif //VERIFY_HEAP
33901 void GCHeap::ValidateObjectMember (Object* obj)
33904 size_t s = size (obj);
33905 uint8_t* o = (uint8_t*)obj;
33907 go_through_object_cl (method_table (obj), o, s, oo,
33909 uint8_t* child_o = *oo;
33912 dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o));
33913 MethodTable *pMT = method_table (child_o);
33915 if (!pMT->SanityCheck()) {
33916 dprintf (3, ("Bad member of %Ix %Ix",
33917 (size_t)oo, (size_t)child_o));
33922 #endif // VERIFY_HEAP
33925 void DestructObject (CObjectHeader* hdr)
33927 UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
33928 hdr->~CObjectHeader();
33931 HRESULT GCHeap::Shutdown ()
33935 GCScan::GcRuntimeStructuresValid (FALSE);
33937 // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all
33938 // threads except the one performing the shutdown.
33939 // ASSERT( !GcInProgress );
33941 // Guard against any more GC occurring and against any threads blocking
33942 // for GC to complete when the GC heap is gone. This fixes a race condition
33943 // where a thread in GC is destroyed as part of process destruction and
33944 // the remaining threads block for GC complete.
33947 //EnterAllocLock();
33949 //EnterFinalizeLock();
33952 // during shutdown lot of threads are suspended
33953 // on this even, we don't want to wake them up just yet
33954 //CloseHandle (WaitForGCEvent);
33956 //find out if the global card table hasn't been used yet
33957 uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
33958 if (card_table_refcount (ct) == 0)
33960 destroy_card_table (ct);
33961 g_gc_card_table = nullptr;
33963 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
33964 g_gc_card_bundle_table = nullptr;
33966 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33967 SoftwareWriteWatch::StaticClose();
33968 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
33971 //destroy all segments on the standby list
33972 while(gc_heap::segment_standby_list != 0)
33974 heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list);
33975 #ifdef MULTIPLE_HEAPS
33976 (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33977 #else //MULTIPLE_HEAPS
33978 pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
33979 #endif //MULTIPLE_HEAPS
33980 gc_heap::segment_standby_list = next_seg;
33984 #ifdef MULTIPLE_HEAPS
33986 for (int i = 0; i < gc_heap::n_heaps; i ++)
33988 delete gc_heap::g_heaps[i]->vm_heap;
33989 //destroy pure GC stuff
33990 gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]);
33993 gc_heap::destroy_gc_heap (pGenGCHeap);
33995 #endif //MULTIPLE_HEAPS
33996 gc_heap::shutdown_gc();
34001 // Wait until a garbage collection is complete
34002 // returns NOERROR if wait was OK, other error code if failure.
34003 // WARNING: This will not undo the must complete state. If you are
34004 // in a must complete when you call this, you'd better know what you're
34007 #ifdef FEATURE_PREMORTEM_FINALIZATION
34009 HRESULT AllocateCFinalize(CFinalize **pCFinalize)
34011 *pCFinalize = new (nothrow) CFinalize();
34012 if (*pCFinalize == NULL || !(*pCFinalize)->Initialize())
34013 return E_OUTOFMEMORY;
34017 #endif // FEATURE_PREMORTEM_FINALIZATION
34019 // init the instance heap
34020 HRESULT GCHeap::Init(size_t hn)
34022 HRESULT hres = S_OK;
34024 #ifdef MULTIPLE_HEAPS
34025 if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
34026 hres = E_OUTOFMEMORY;
34028 UNREFERENCED_PARAMETER(hn);
34029 if (!gc_heap::make_gc_heap())
34030 hres = E_OUTOFMEMORY;
34031 #endif //MULTIPLE_HEAPS
34037 //System wide initialization
34038 HRESULT GCHeap::Initialize()
34042 g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
34043 g_num_processors = GCToOSInterface::GetTotalProcessorCount();
34044 assert(g_num_processors != 0);
34046 //Initialize the static members.
34049 CreatedObjectCount = 0;
34052 bool is_restricted;
34053 gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&is_restricted);
34056 gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();
34058 if (!(gc_heap::heap_hard_limit))
34060 uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent();
34061 if ((percent_of_mem > 0) && (percent_of_mem < 100))
34063 gc_heap::heap_hard_limit = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);
34067 // If the hard limit is specified, the user is saying even if the process is already
34068 // running in a container, use this limit for the GC heap.
34069 if (!(gc_heap::heap_hard_limit))
34073 uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100;
34074 //printf ("returned physical mem %I64d, setting it to max (75%%: %I64d, 20mb)\n",
34075 // gc_heap::total_physical_mem, physical_mem_for_gc);
34076 gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
34080 //printf ("heap_hard_limit is %Id, total physical mem: %Id, %s restricted\n",
34081 // gc_heap::heap_hard_limit, gc_heap::total_physical_mem, (is_restricted ? "is" : "is not"));
34085 uint32_t nhp_from_config = 0;
34087 #ifdef MULTIPLE_HEAPS
34088 AffinitySet config_affinity_set;
34089 if (!ParseGCHeapAffinitizeRanges(&config_affinity_set))
34091 return CLR_E_GC_BAD_AFFINITY_CONFIG_FORMAT;
34094 uintptr_t config_affinity_mask = static_cast<uintptr_t>(GCConfig::GetGCHeapAffinitizeMask());
34095 const AffinitySet* process_affinity_set = GCToOSInterface::SetGCThreadsAffinitySet(config_affinity_mask, &config_affinity_set);
34097 if (process_affinity_set->IsEmpty())
34099 return CLR_E_GC_BAD_AFFINITY_CONFIG;
34102 nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
34104 uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
34106 if (nhp_from_config)
34108 // Even when the user specifies a heap count, it should not be more
34109 // than the number of procs this process can use.
34110 nhp_from_config = min (nhp_from_config, nhp_from_process);
34113 nhp = ((nhp_from_config == 0) ? nhp_from_process : nhp_from_config);
34115 nhp = min (nhp, MAX_SUPPORTED_CPUS);
34116 #ifndef FEATURE_REDHAWK
34117 gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? false : (GCConfig::GetNoAffinitize() != 0));
34119 if (gc_heap::heap_hard_limit)
34121 gc_heap::gc_thread_no_affinitize_p = ((config_affinity_set.Count() == 0) && (config_affinity_mask == 0));
34124 if (!(gc_heap::gc_thread_no_affinitize_p))
34126 uint32_t num_affinitized_processors = (uint32_t)process_affinity_set->Count();
34128 if (num_affinitized_processors != 0)
34130 nhp = min(nhp, num_affinitized_processors);
34133 // Limit the GC heaps to the number of processors available in the system.
34134 nhp = min (nhp, GCToOSInterface::GetTotalProcessorCount());
34135 #endif // FEATURE_PAL
34137 #endif //!FEATURE_REDHAWK
34138 #endif //MULTIPLE_HEAPS
34140 size_t seg_size = 0;
34141 size_t large_seg_size = 0;
34143 if (gc_heap::heap_hard_limit)
34145 seg_size = gc_heap::get_segment_size_hard_limit (&nhp, (nhp_from_config == 0));
34146 gc_heap::soh_segment_size = seg_size;
34147 large_seg_size = seg_size * 2;
34151 seg_size = get_valid_segment_size();
34152 gc_heap::soh_segment_size = seg_size;
34153 large_seg_size = get_valid_segment_size (TRUE);
34156 dprintf (1, ("%d heaps, soh seg size: %Id mb, loh: %Id mb\n",
34158 (seg_size / (size_t)1024 / 1024),
34159 (large_seg_size / 1024 / 1024)));
34161 gc_heap::min_loh_segment_size = large_seg_size;
34162 gc_heap::min_segment_size = min (seg_size, large_seg_size);
34163 #ifdef SEG_MAPPING_TABLE
34164 gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size);
34165 #endif //SEG_MAPPING_TABLE
34167 #ifdef MULTIPLE_HEAPS
34168 gc_heap::n_heaps = nhp;
34169 hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
34171 hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
34172 #endif //MULTIPLE_HEAPS
34177 gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
34178 #ifndef MULTIPLE_HEAPS
34179 gc_heap::mem_one_percent /= g_num_processors;
34180 #endif //!MULTIPLE_HEAPS
34182 uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent();
34183 if (highmem_th_from_config)
34185 gc_heap::high_memory_load_th = min (99, highmem_th_from_config);
34186 gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7));
34190 // We should only use this if we are in the "many process" mode which really is only applicable
34191 // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory.
34192 // For now I am using an estimate to calculate these numbers but this should really be obtained
34193 // programmatically going forward.
34194 // I am assuming 47 processes using WKS GC and 3 using SVR GC.
34195 // I am assuming 3 in part due to the "very high memory load" is 97%.
34196 int available_mem_th = 10;
34197 if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
34199 int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(GCToOSInterface::GetTotalProcessorCount()));
34200 available_mem_th = min (available_mem_th, adjusted_available_mem_th);
34203 gc_heap::high_memory_load_th = 100 - available_mem_th;
34204 gc_heap::v_high_memory_load_th = 97;
34207 gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th);
34209 gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0);
34212 gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent;
34215 WaitForGCEvent = new (nothrow) GCEvent;
34217 if (!WaitForGCEvent)
34219 return E_OUTOFMEMORY;
34222 if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE))
34227 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
34228 #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
34229 if (GCStress<cfg_any>::IsEnabled()) {
34230 for(int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
34232 m_StressObjs[i] = CreateGlobalHandle(0);
34234 m_CurStressObj = 0;
34236 #endif //STRESS_HEAP && !MULTIPLE_HEAPS
34237 #endif // FEATURE_REDHAWK
34239 initGCShadow(); // If we are debugging write barriers, initialize heap shadow
34241 #ifdef MULTIPLE_HEAPS
34243 for (unsigned i = 0; i < nhp; i++)
34245 GCHeap* Hp = new (nothrow) GCHeap();
34247 return E_OUTOFMEMORY;
34249 if ((hr = Hp->Init (i))!= S_OK)
34254 // initialize numa node to heap map
34255 heap_select::init_numa_node_to_heap_map(nhp);
34258 #endif //MULTIPLE_HEAPS
34262 GCScan::GcRuntimeStructuresValid (TRUE);
34264 GCToEEInterface::DiagUpdateGenerationBounds();
34271 // GC callback functions
34272 bool GCHeap::IsPromoted(Object* object)
34275 ((CObjectHeader*)object)->Validate();
34278 uint8_t* o = (uint8_t*)object;
34280 if (gc_heap::settings.condemned_generation == max_generation)
34282 #ifdef MULTIPLE_HEAPS
34283 gc_heap* hp = gc_heap::g_heaps[0];
34285 gc_heap* hp = pGenGCHeap;
34286 #endif //MULTIPLE_HEAPS
34288 #ifdef BACKGROUND_GC
34289 if (gc_heap::settings.concurrent)
34291 bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
34292 hp->background_marked (o));
34296 #endif //BACKGROUND_GC
34298 return (!((o < hp->highest_address) && (o >= hp->lowest_address))
34299 || hp->is_mark_set (o));
34304 gc_heap* hp = gc_heap::heap_of (o);
34305 return (!((o < hp->gc_high) && (o >= hp->gc_low))
34306 || hp->is_mark_set (o));
34310 size_t GCHeap::GetPromotedBytes(int heap_index)
34312 #ifdef BACKGROUND_GC
34313 if (gc_heap::settings.concurrent)
34315 return gc_heap::bpromoted_bytes (heap_index);
34318 #endif //BACKGROUND_GC
34320 return gc_heap::promoted_bytes (heap_index);
34324 void GCHeap::SetYieldProcessorScalingFactor (float scalingFactor)
34326 assert (yp_spin_count_unit != 0);
34327 int saved_yp_spin_count_unit = yp_spin_count_unit;
34328 yp_spin_count_unit = (int)((float)yp_spin_count_unit * scalingFactor / (float)9);
34330 // It's very suspicious if it becomes 0
34331 if (yp_spin_count_unit == 0)
34333 yp_spin_count_unit = saved_yp_spin_count_unit;
34337 unsigned int GCHeap::WhichGeneration (Object* object)
34339 gc_heap* hp = gc_heap::heap_of ((uint8_t*)object);
34340 unsigned int g = hp->object_gennum ((uint8_t*)object);
34341 dprintf (3, ("%Ix is in gen %d", (size_t)object, g));
34345 bool GCHeap::IsEphemeral (Object* object)
34347 uint8_t* o = (uint8_t*)object;
34348 gc_heap* hp = gc_heap::heap_of (o);
34349 return !!hp->ephemeral_pointer_p (o);
34352 // Return NULL if can't find next object. When EE is not suspended,
34353 // the result is not accurate: if the input arg is in gen0, the function could
34354 // return zeroed out memory as next object
34355 Object * GCHeap::NextObj (Object * object)
34358 uint8_t* o = (uint8_t*)object;
34360 #ifndef FEATURE_BASICFREEZE
34361 if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
34365 #endif //!FEATURE_BASICFREEZE
34367 heap_segment * hs = gc_heap::find_segment (o, FALSE);
34373 BOOL large_object_p = heap_segment_loh_p (hs);
34374 if (large_object_p)
34375 return NULL; //could be racing with another core allocating.
34376 #ifdef MULTIPLE_HEAPS
34377 gc_heap* hp = heap_segment_heap (hs);
34378 #else //MULTIPLE_HEAPS
34380 #endif //MULTIPLE_HEAPS
34381 unsigned int g = hp->object_gennum ((uint8_t*)object);
34382 if ((g == 0) && hp->settings.demotion)
34383 return NULL;//could be racing with another core allocating.
34384 int align_const = get_alignment_constant (!large_object_p);
34385 uint8_t* nextobj = o + Align (size (o), align_const);
34386 if (nextobj <= o) // either overflow or 0 sized object.
34391 if ((nextobj < heap_segment_mem(hs)) ||
34392 (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) ||
34393 (nextobj >= hp->alloc_allocated))
34398 return (Object *)nextobj;
34401 #endif // VERIFY_HEAP
34404 // returns TRUE if the pointer is in one of the GC heaps.
34405 bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
34407 // removed STATIC_CONTRACT_CAN_TAKE_LOCK here because find_segment
34408 // no longer calls GCEvent::Wait which eventually takes a lock.
34410 uint8_t* object = (uint8_t*) vpObject;
34411 #ifndef FEATURE_BASICFREEZE
34412 if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
34414 #endif //!FEATURE_BASICFREEZE
34416 heap_segment * hs = gc_heap::find_segment (object, small_heap_only);
34420 #ifdef STRESS_PINNING
34421 static n_promote = 0;
34422 #endif //STRESS_PINNING
34423 // promote an object
34424 void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
34426 THREAD_NUMBER_FROM_CONTEXT;
34427 #ifndef MULTIPLE_HEAPS
34428 const int thread = 0;
34429 #endif //!MULTIPLE_HEAPS
34431 uint8_t* o = (uint8_t*)*ppObject;
34436 #ifdef DEBUG_DestroyedHandleValue
34437 // we can race with destroy handle during concurrent scan
34438 if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
34440 #endif //DEBUG_DestroyedHandleValue
34444 gc_heap* hp = gc_heap::heap_of (o);
34446 dprintf (3, ("Promote %Ix", (size_t)o));
34448 #ifdef INTERIOR_POINTERS
34449 if (flags & GC_CALL_INTERIOR)
34451 if ((o < hp->gc_low) || (o >= hp->gc_high))
34455 if ( (o = hp->find_object (o, hp->gc_low)) == 0)
34461 #endif //INTERIOR_POINTERS
34463 #ifdef FEATURE_CONSERVATIVE_GC
34464 // For conservative GC, a value on stack may point to middle of a free object.
34465 // In this case, we don't need to promote the pointer.
34466 if (GCConfig::GetConservativeGC()
34467 && ((CObjectHeader*)o)->IsFree())
34474 ((CObjectHeader*)o)->ValidatePromote(sc, flags);
34476 UNREFERENCED_PARAMETER(sc);
34479 if (flags & GC_CALL_PINNED)
34480 hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
34482 #ifdef STRESS_PINNING
34483 if ((++n_promote % 20) == 1)
34484 hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
34485 #endif //STRESS_PINNING
34487 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34488 size_t promoted_size_begin = hp->promoted_bytes (thread);
34489 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34491 if ((o >= hp->gc_low) && (o < hp->gc_high))
34493 hpt->mark_object_simple (&o THREAD_NUMBER_ARG);
34496 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
34497 size_t promoted_size_end = hp->promoted_bytes (thread);
34498 if (g_fEnableAppDomainMonitoring)
34500 if (sc->pCurrentDomain)
34502 GCToEEInterface::RecordSurvivedBytesForHeap((promoted_size_end - promoted_size_begin), thread, sc->pCurrentDomain);
34505 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
34507 STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL);
34510 void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
34513 UNREFERENCED_PARAMETER(sc);
34515 uint8_t* object = (uint8_t*)(Object*)(*ppObject);
34517 THREAD_NUMBER_FROM_CONTEXT;
34519 //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject));
34520 dprintf (3, ("R: %Ix", (size_t)ppObject));
34525 gc_heap* hp = gc_heap::heap_of (object);
34528 if (!(flags & GC_CALL_INTERIOR))
34530 // We cannot validate this object if it's in the condemned gen because it could
34531 // be one of the objects that were overwritten by an artificial gap due to a pinned plug.
34532 if (!((object >= hp->gc_low) && (object < hp->gc_high)))
34534 ((CObjectHeader*)object)->Validate(FALSE);
34539 dprintf (3, ("Relocate %Ix\n", (size_t)object));
34543 if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction)
34545 if (!((object >= hp->gc_low) && (object < hp->gc_high)))
34550 if (gc_heap::loh_object_p (object))
34552 pheader = hp->find_object (object, 0);
34558 ptrdiff_t ref_offset = object - pheader;
34559 hp->relocate_address(&pheader THREAD_NUMBER_ARG);
34560 *ppObject = (Object*)(pheader + ref_offset);
34567 hp->relocate_address(&pheader THREAD_NUMBER_ARG);
34568 *ppObject = (Object*)pheader;
34571 STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
34574 /*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
34576 // For now we simply look at the size of the object to determine if it in the
34577 // fixed heap or not. If the bit indicating this gets set at some point
34578 // we should key off that instead.
34579 return size( pObj ) >= loh_size_threshold;
34582 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
34585 void StressHeapDummy ();
34587 static int32_t GCStressStartCount = -1;
34588 static int32_t GCStressCurCount = 0;
34589 static int32_t GCStressStartAtJit = -1;
34591 // the maximum number of foreground GCs we'll induce during one BGC
34592 // (this number does not include "naturally" occuring GCs).
34593 static int32_t GCStressMaxFGCsPerBGC = -1;
34595 // CLRRandom implementation can produce FPU exceptions if
34596 // the test/application run by CLR is enabling any FPU exceptions.
34597 // We want to avoid any unexpected exception coming from stress
34598 // infrastructure, so CLRRandom is not an option.
34599 // The code below is a replicate of CRT rand() implementation.
34600 // Using CRT rand() is not an option because we will interfere with the user application
34601 // that may also use it.
34602 int StressRNG(int iMaxValue)
34604 static BOOL bisRandInit = FALSE;
34605 static int lHoldrand = 1L;
34609 lHoldrand = (int)time(NULL);
34610 bisRandInit = TRUE;
34612 int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
34613 return randValue % iMaxValue;
34615 #endif // STRESS_HEAP
34616 #endif // !FEATURE_REDHAWK
34618 // free up object so that things will move and then do a GC
34619 //return TRUE if GC actually happens, otherwise FALSE
34620 bool GCHeap::StressHeap(gc_alloc_context * context)
34622 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34623 alloc_context* acontext = static_cast<alloc_context*>(context);
34624 assert(context != nullptr);
34626 // if GC stress was dynamically disabled during this run we return FALSE
34627 if (!GCStressPolicy::IsEnabled())
34631 if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) {
34637 if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE)
34639 || g_pConfig->FastGCStressLevel() > 1
34642 if (!Thread::UniqueStack(&acontext)) {
34647 #ifdef BACKGROUND_GC
34648 // don't trigger a GC from the GC threads but still trigger GCs from user threads.
34649 if (GCToEEInterface::WasCurrentThreadCreatedByGC())
34653 #endif //BACKGROUND_GC
34655 if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
34657 GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
34658 GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
34661 if (GCStressMaxFGCsPerBGC == -1)
34663 GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
34664 if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
34665 GCStressMaxFGCsPerBGC = 6;
34669 if (g_JitCount < GCStressStartAtJit)
34673 // Allow programmer to skip the first N Stress GCs so that you can
34674 // get to the interesting ones faster.
34675 Interlocked::Increment(&GCStressCurCount);
34676 if (GCStressCurCount < GCStressStartCount)
34679 // throttle the number of stress-induced GCs by a factor given by GCStressStep
34680 if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
34685 #ifdef BACKGROUND_GC
34686 if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
34688 // allow a maximum number of stress induced FGCs during one BGC
34689 if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
34691 ++gc_stress_fgcs_in_bgc;
34693 #endif // BACKGROUND_GC
34695 if (g_pStringClass == 0)
34697 // If the String class has not been loaded, dont do any stressing. This should
34698 // be kept to a minimum to get as complete coverage as possible.
34699 _ASSERTE(g_fEEInit);
34703 #ifndef MULTIPLE_HEAPS
34704 static int32_t OneAtATime = -1;
34706 // Only bother with this if the stress level is big enough and if nobody else is
34707 // doing it right now. Note that some callers are inside the AllocLock and are
34708 // guaranteed synchronized. But others are using AllocationContexts and have no
34709 // particular synchronization.
34711 // For this latter case, we want a very high-speed way of limiting this to one
34712 // at a time. A secondary advantage is that we release part of our StressObjs
34713 // buffer sparingly but just as effectively.
34715 if (Interlocked::Increment(&OneAtATime) == 0 &&
34716 !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
34720 // If the current string is used up
34721 if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0)
34723 // Populate handles with strings
34724 int i = m_CurStressObj;
34725 while(HndFetchHandle(m_StressObjs[i]) == 0)
34727 _ASSERTE(m_StressObjs[i] != 0);
34728 unsigned strLen = ((unsigned)loh_size_threshold - 32) / sizeof(WCHAR);
34729 unsigned strSize = PtrAlign(StringObject::GetSize(strLen));
34731 // update the cached type handle before allocating
34732 SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
34733 str = (StringObject*) pGenGCHeap->allocate (strSize, acontext);
34736 str->SetMethodTable (g_pStringClass);
34737 str->SetStringLength (strLen);
34738 HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
34740 i = (i + 1) % NUM_HEAP_STRESS_OBJS;
34741 if (i == m_CurStressObj) break;
34744 // advance the current handle to the next string
34745 m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS;
34748 // Get the current string
34749 str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj]));
34752 // Chop off the end of the string and form a new object out of it.
34753 // This will 'free' an object at the begining of the heap, which will
34754 // force data movement. Note that we can only do this so many times.
34755 // before we have to move on to the next string.
34756 unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31);
34757 if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR))
34759 unsigned sizeToNextObj = (unsigned)Align(size(str));
34760 uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj;
34761 pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj);
34762 str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR)));
34766 // Let the string itself become garbage.
34767 // will be realloced next time around
34768 HndAssignHandle(m_StressObjs[m_CurStressObj], 0);
34772 Interlocked::Decrement(&OneAtATime);
34773 #endif // !MULTIPLE_HEAPS
34774 if (IsConcurrentGCEnabled())
34776 int rgen = StressRNG(10);
34778 // gen0:gen1:gen2 distribution: 40:40:20
34781 else if (rgen >= 4)
34786 GarbageCollectTry (rgen, FALSE, collection_gcstress);
34790 GarbageCollect(max_generation, FALSE, collection_gcstress);
34795 UNREFERENCED_PARAMETER(context);
34797 #endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
34801 #ifdef FEATURE_PREMORTEM_FINALIZATION
34802 #define REGISTER_FOR_FINALIZATION(_object, _size) \
34803 hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
34804 #else // FEATURE_PREMORTEM_FINALIZATION
34805 #define REGISTER_FOR_FINALIZATION(_object, _size) true
34806 #endif // FEATURE_PREMORTEM_FINALIZATION
34808 #define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do { \
34809 if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size))) \
34811 STRESS_LOG_OOM_STACK(_size); \
34817 // Small Object Allocator
34820 // Allocate small object with an alignment requirement of 8-bytes.
34822 GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
34824 #ifdef FEATURE_64BIT_ALIGNMENT
34830 alloc_context* acontext = static_cast<alloc_context*>(ctx);
34832 #ifdef MULTIPLE_HEAPS
34833 if (acontext->get_alloc_heap() == 0)
34835 AssignHeap (acontext);
34836 assert (acontext->get_alloc_heap());
34839 gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
34841 gc_heap* hp = pGenGCHeap;
34842 #endif //MULTIPLE_HEAPS
34844 return AllocAlign8Common(hp, acontext, size, flags);
34846 UNREFERENCED_PARAMETER(ctx);
34847 UNREFERENCED_PARAMETER(size);
34848 UNREFERENCED_PARAMETER(flags);
34849 assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
34851 #endif //FEATURE_64BIT_ALIGNMENT
34854 // Common code used by both variants of AllocAlign8 above.
34856 GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
34858 #ifdef FEATURE_64BIT_ALIGNMENT
34864 gc_heap* hp = (gc_heap*)_hp;
34868 Object* newAlloc = NULL;
34871 #ifdef COUNT_CYCLES
34872 AllocStart = GetCycleCount32();
34874 #elif defined(ENABLE_INSTRUMENTATION)
34875 unsigned AllocStart = GetInstLogTime();
34877 #endif //COUNT_CYCLES
34880 if (size < loh_size_threshold)
34886 // Depending on where in the object the payload requiring 8-byte alignment resides we might have to
34887 // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
34888 // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
34889 size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;
34891 // Retrieve the address of the next allocation from the context (note that we're inside the alloc
34892 // lock at this point).
34893 uint8_t* result = acontext->alloc_ptr;
34895 // Will an allocation at this point yield the correct alignment and fit into the remainder of the
34897 if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
34899 // Yes, we can just go ahead and make the allocation.
34900 newAlloc = (Object*) hp->allocate (size, acontext);
34901 ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34905 // No, either the next available address is not aligned in the way we require it or there's
34906 // not enough space to allocate an object of the required size. In both cases we allocate a
34907 // padding object (marked as a free object). This object's size is such that it will reverse
34908 // the alignment of the next header (asserted below).
34910 // We allocate both together then decide based on the result whether we'll format the space as
34911 // free object + real object or real object + free object.
34912 ASSERT((Align(min_obj_size) & 7) == 4);
34913 CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext);
34916 if (((size_t)freeobj & 7) == desiredAlignment)
34918 // New allocation has desired alignment, return this one and place the free object at the
34919 // end of the allocated space.
34920 newAlloc = (Object*)freeobj;
34921 freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
34925 // New allocation is still mis-aligned, format the initial space as a free object and the
34926 // rest of the space should be correctly aligned for the real object.
34927 newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
34928 ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
34930 freeobj->SetFree(min_obj_size);
34936 // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
34937 // support mis-aligned object headers so we can't support biased headers as above. Luckily for us
34938 // we've managed to arrange things so the only case where we see a bias is for boxed value types and
34939 // these can never get large enough to be allocated on the LOH.
34940 ASSERT(65536 < loh_size_threshold);
34941 ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);
34943 alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
34945 newAlloc = (Object*) hp->allocate_large_object (size, acontext->alloc_bytes_loh);
34946 ASSERT(((size_t)newAlloc & 7) == 0);
34949 CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
34952 #ifdef COUNT_CYCLES
34953 finish = GetCycleCount32();
34954 #elif defined(ENABLE_INSTRUMENTATION)
34955 finish = GetInstLogTime();
34956 #endif //COUNT_CYCLES
34957 AllocDuration += finish - AllocStart;
34962 UNREFERENCED_PARAMETER(_hp);
34963 UNREFERENCED_PARAMETER(acontext);
34964 UNREFERENCED_PARAMETER(size);
34965 UNREFERENCED_PARAMETER(flags);
34966 assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
34968 #endif // FEATURE_64BIT_ALIGNMENT
34972 GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
34981 Object* newAlloc = NULL;
34984 #ifdef COUNT_CYCLES
34985 AllocStart = GetCycleCount32();
34987 #elif defined(ENABLE_INSTRUMENTATION)
34988 unsigned AllocStart = GetInstLogTime();
34990 #endif //COUNT_CYCLES
34993 #ifdef MULTIPLE_HEAPS
34994 //take the first heap....
34995 gc_heap* hp = gc_heap::g_heaps[0];
34997 gc_heap* hp = pGenGCHeap;
34999 // prefix complains about us dereferencing hp in wks build even though we only access static members
35000 // this way. not sure how to shut it up except for this ugly workaround:
35001 PREFIX_ASSUME(hp != NULL);
35003 #endif //MULTIPLE_HEAPS
35005 alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));
35007 newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
35008 #ifdef FEATURE_STRUCTALIGN
35009 newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
35010 #endif // FEATURE_STRUCTALIGN
35011 CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
35014 #ifdef COUNT_CYCLES
35015 finish = GetCycleCount32();
35016 #elif defined(ENABLE_INSTRUMENTATION)
35017 finish = GetInstLogTime();
35018 #endif //COUNT_CYCLES
35019 AllocDuration += finish - AllocStart;
35026 GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
35035 Object* newAlloc = NULL;
35036 alloc_context* acontext = static_cast<alloc_context*>(context);
35039 #ifdef COUNT_CYCLES
35040 AllocStart = GetCycleCount32();
35042 #elif defined(ENABLE_INSTRUMENTATION)
35043 unsigned AllocStart = GetInstLogTime();
35045 #endif //COUNT_CYCLES
35048 #ifdef MULTIPLE_HEAPS
35049 if (acontext->get_alloc_heap() == 0)
35051 AssignHeap (acontext);
35052 assert (acontext->get_alloc_heap());
35054 #endif //MULTIPLE_HEAPS
35056 #ifdef MULTIPLE_HEAPS
35057 gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
35059 gc_heap* hp = pGenGCHeap;
35061 // prefix complains about us dereferencing hp in wks build even though we only access static members
35062 // this way. not sure how to shut it up except for this ugly workaround:
35063 PREFIX_ASSUME(hp != NULL);
35065 #endif //MULTIPLE_HEAPS
35067 if (size < loh_size_threshold)
35073 newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext);
35074 #ifdef FEATURE_STRUCTALIGN
35075 newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
35076 #endif // FEATURE_STRUCTALIGN
35077 // ASSERT (newAlloc);
35081 newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), acontext->alloc_bytes_loh);
35082 #ifdef FEATURE_STRUCTALIGN
35083 newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
35084 #endif // FEATURE_STRUCTALIGN
35087 CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
35090 #ifdef COUNT_CYCLES
35091 finish = GetCycleCount32();
35092 #elif defined(ENABLE_INSTRUMENTATION)
35093 finish = GetInstLogTime();
35094 #endif //COUNT_CYCLES
35095 AllocDuration += finish - AllocStart;
35102 GCHeap::FixAllocContext (gc_alloc_context* context, void* arg, void *heap)
35104 alloc_context* acontext = static_cast<alloc_context*>(context);
35105 #ifdef MULTIPLE_HEAPS
35108 acontext->alloc_count = 0;
35110 uint8_t * alloc_ptr = acontext->alloc_ptr;
35115 // The acontext->alloc_heap can be out of sync with the ptrs because
35116 // of heap re-assignment in allocate
35117 gc_heap* hp = gc_heap::heap_of (alloc_ptr);
35119 gc_heap* hp = pGenGCHeap;
35120 #endif //MULTIPLE_HEAPS
35122 if (heap == NULL || heap == hp)
35124 hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE),
35125 get_alignment_constant(TRUE));
35130 GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly)
35132 uint8_t *o = (uint8_t*)pInteriorPtr;
35134 gc_heap* hp = gc_heap::heap_of (o);
35136 uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address);
35137 uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address);
35139 if (o >= lowest && o < highest)
35141 o = hp->find_object (o, lowest);
35148 return (Object *)o;
35151 BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
35153 if (dd_new_allocation (dd) < 0)
35158 if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3))
35166 //----------------------------------------------------------------------------
35167 // #GarbageCollector
35169 // API to ensure that a complete new garbage collection takes place
35172 GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
35177 size_t total_allocated = 0;
35178 size_t total_desired = 0;
35179 #ifdef MULTIPLE_HEAPS
35181 for (hn = 0; hn < gc_heap::n_heaps; hn++)
35183 gc_heap* hp = gc_heap::g_heaps [hn];
35184 total_desired += dd_desired_allocation (hp->dynamic_data_of (0));
35185 total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))-
35186 dd_new_allocation (hp->dynamic_data_of (0));
35189 gc_heap* hp = pGenGCHeap;
35190 total_desired = dd_desired_allocation (hp->dynamic_data_of (0));
35191 total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))-
35192 dd_new_allocation (hp->dynamic_data_of (0));
35193 #endif //MULTIPLE_HEAPS
35195 if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent))
35197 dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning",
35198 total_allocated, total_desired));
35205 #ifdef MULTIPLE_HEAPS
35206 gc_heap* hpt = gc_heap::g_heaps[0];
35209 #endif //MULTIPLE_HEAPS
35211 generation = (generation < 0) ? max_generation : min (generation, max_generation);
35212 dynamic_data* dd = hpt->dynamic_data_of (generation);
35214 #ifdef BACKGROUND_GC
35215 if (recursive_gc_sync::background_running_p())
35217 if ((mode == collection_optimized) || (mode & collection_non_blocking))
35221 if (mode & collection_blocking)
35223 pGenGCHeap->background_gc_wait();
35224 if (mode & collection_optimized)
35230 #endif //BACKGROUND_GC
35232 if (mode & collection_optimized)
35234 if (pGenGCHeap->gc_started)
35240 BOOL should_collect = FALSE;
35241 BOOL should_check_loh = (generation == max_generation);
35242 #ifdef MULTIPLE_HEAPS
35243 for (int i = 0; i < gc_heap::n_heaps; i++)
35245 dynamic_data* dd1 = gc_heap::g_heaps [i]->dynamic_data_of (generation);
35246 dynamic_data* dd2 = (should_check_loh ?
35247 (gc_heap::g_heaps [i]->dynamic_data_of (max_generation + 1)) :
35250 if (should_collect_optimized (dd1, low_memory_p))
35252 should_collect = TRUE;
35255 if (dd2 && should_collect_optimized (dd2, low_memory_p))
35257 should_collect = TRUE;
35262 should_collect = should_collect_optimized (dd, low_memory_p);
35263 if (!should_collect && should_check_loh)
35266 should_collect_optimized (hpt->dynamic_data_of (max_generation + 1), low_memory_p);
35268 #endif //MULTIPLE_HEAPS
35269 if (!should_collect)
35276 size_t CollectionCountAtEntry = dd_collection_count (dd);
35277 size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking];
35278 size_t CurrentCollectionCount = 0;
35282 CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode);
35284 if ((mode & collection_blocking) &&
35285 (generation == max_generation) &&
35286 (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
35288 #ifdef BACKGROUND_GC
35289 if (recursive_gc_sync::background_running_p())
35291 pGenGCHeap->background_gc_wait();
35293 #endif //BACKGROUND_GC
35298 if (CollectionCountAtEntry == CurrentCollectionCount)
35307 GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode)
35309 int gen = (generation < 0) ?
35310 max_generation : min (generation, max_generation);
35312 gc_reason reason = reason_empty;
35316 if (mode & collection_blocking)
35318 reason = reason_lowmemory_blocking;
35322 reason = reason_lowmemory;
35327 reason = reason_induced;
35330 if (reason == reason_induced)
35332 if (mode & collection_compacting)
35334 reason = reason_induced_compacting;
35336 else if (mode & collection_non_blocking)
35338 reason = reason_induced_noforce;
35341 else if (mode & collection_gcstress)
35343 reason = reason_gcstress;
35348 return GarbageCollectGeneration (gen, reason);
35351 void gc_heap::do_pre_gc()
35353 STRESS_LOG_GC_STACK;
35356 STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index),
35357 (uint32_t)settings.condemned_generation,
35358 (uint32_t)settings.reason);
35359 #endif // STRESS_LOG
35361 #ifdef MULTIPLE_HEAPS
35362 gc_heap* hp = g_heaps[0];
35365 #endif //MULTIPLE_HEAPS
35367 #ifdef BACKGROUND_GC
35368 settings.b_state = hp->current_bgc_state;
35369 #endif //BACKGROUND_GC
35372 size_t total_allocated_since_last_gc = get_total_allocated_since_last_gc();
35373 #ifdef BACKGROUND_GC
35374 dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)(%s)(%d)",
35375 VolatileLoad(&settings.gc_index),
35376 dd_collection_count (hp->dynamic_data_of (0)),
35377 settings.condemned_generation,
35378 total_allocated_since_last_gc,
35379 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
35380 settings.b_state));
35382 dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)",
35383 VolatileLoad(&settings.gc_index),
35384 dd_collection_count(hp->dynamic_data_of(0)),
35385 settings.condemned_generation,
35386 total_allocated_since_last_gc));
35387 #endif //BACKGROUND_GC
35389 if (heap_hard_limit)
35391 size_t total_heap_committed = get_total_committed_size();
35392 size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping;
35393 dprintf (1, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id)",
35394 settings.condemned_generation,
35395 (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded));
35399 // TODO: this can happen...it's because of the way we are calling
35400 // do_pre_gc, will fix later.
35401 //if (last_gc_index > VolatileLoad(&settings.gc_index))
35403 // FATAL_GC_ERROR();
35406 last_gc_index = VolatileLoad(&settings.gc_index);
35407 GCHeap::UpdatePreGCCounters();
35408 #if defined(__linux__)
35409 GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
35410 static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)),
35411 static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)),
35412 static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private)));
35413 #endif // __linux__
35415 if (settings.concurrent)
35417 #ifdef BACKGROUND_GC
35418 full_gc_counts[gc_type_background]++;
35419 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
35420 GCHeap::gc_stress_fgcs_in_bgc = 0;
35421 #endif // STRESS_HEAP && !FEATURE_REDHAWK
35422 #endif // BACKGROUND_GC
35426 if (settings.condemned_generation == max_generation)
35428 full_gc_counts[gc_type_blocking]++;
35432 #ifdef BACKGROUND_GC
35433 if (settings.background_p)
35435 ephemeral_fgc_counts[settings.condemned_generation]++;
35437 #endif //BACKGROUND_GC
35441 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
35442 if (g_fEnableAppDomainMonitoring)
35444 GCToEEInterface::ResetTotalSurvivedBytes();
35446 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
35449 #ifdef GC_CONFIG_DRIVEN
35450 void gc_heap::record_interesting_info_per_heap()
35452 // datapoints are always from the last blocking GC so don't record again
35454 if (!(settings.concurrent))
35456 for (int i = 0; i < max_idp_count; i++)
35458 interesting_data_per_heap[i] += interesting_data_per_gc[i];
35462 int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
35463 if (compact_reason >= 0)
35464 (compact_reasons_per_heap[compact_reason])++;
35465 int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
35466 if (expand_mechanism >= 0)
35467 (expand_mechanisms_per_heap[expand_mechanism])++;
35469 for (int i = 0; i < max_gc_mechanism_bits_count; i++)
35471 if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i))
35472 (interesting_mechanism_bits_per_heap[i])++;
35475 // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
35476 cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |",
35478 (size_t)settings.gc_index,
35479 settings.condemned_generation,
35480 // TEMP - I am just doing this for wks GC 'cuase I wanna see the pattern of doing C/S GCs.
35481 (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction
35482 ((expand_mechanism >= 0)? "X" : ""), // EX
35483 ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF
35484 ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF
35485 (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML
35486 (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM
35487 interesting_data_per_gc[idp_pre_short],
35488 interesting_data_per_gc[idp_post_short],
35489 interesting_data_per_gc[idp_merged_pin],
35490 interesting_data_per_gc[idp_converted_pin],
35491 interesting_data_per_gc[idp_pre_pin],
35492 interesting_data_per_gc[idp_post_pin],
35493 interesting_data_per_gc[idp_pre_and_post_pin],
35494 interesting_data_per_gc[idp_pre_short_padded],
35495 interesting_data_per_gc[idp_post_short_padded]));
35498 void gc_heap::record_global_mechanisms()
35500 for (int i = 0; i < max_global_mechanisms_count; i++)
35502 if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i))
35504 ::record_global_mechanism (i);
35509 BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p)
35511 if (!compact_ratio)
35512 return (!compact_p);
35514 size_t compact_count = compact_or_sweep_gcs[0];
35515 size_t sweep_count = compact_or_sweep_gcs[1];
35517 size_t total_count = compact_count + sweep_count;
35518 BOOL should_compact = compact_p;
35519 if (total_count > 3)
35523 int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1));
35524 if (temp_ratio > compact_ratio)
35526 // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n",
35527 // (compact_count + 1), (total_count + 1), temp_ratio));
35528 should_compact = FALSE;
35533 int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1));
35534 if (temp_ratio > (100 - compact_ratio))
35536 // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n",
35537 // (sweep_count + 1), (total_count + 1), temp_ratio));
35538 should_compact = TRUE;
35543 return !should_compact;
35545 #endif //GC_CONFIG_DRIVEN
35547 bool gc_heap::is_pm_ratio_exceeded()
35549 size_t maxgen_frag = 0;
35550 size_t maxgen_size = 0;
35551 size_t total_heap_size = get_total_heap_size();
35553 #ifdef MULTIPLE_HEAPS
35554 for (int i = 0; i < gc_heap::n_heaps; i++)
35556 gc_heap* hp = gc_heap::g_heaps[i];
35557 #else //MULTIPLE_HEAPS
35559 gc_heap* hp = pGenGCHeap;
35560 #endif //MULTIPLE_HEAPS
35562 maxgen_frag += dd_fragmentation (hp->dynamic_data_of (max_generation));
35563 maxgen_size += hp->generation_size (max_generation);
35566 double maxgen_ratio = (double)maxgen_size / (double)total_heap_size;
35567 double maxgen_frag_ratio = (double)maxgen_frag / (double)maxgen_size;
35568 dprintf (GTC_LOG, ("maxgen %Id(%d%% total heap), frag: %Id (%d%% maxgen)",
35569 maxgen_size, (int)(maxgen_ratio * 100.0),
35570 maxgen_frag, (int)(maxgen_frag_ratio * 100.0)));
35572 bool maxgen_highfrag_p = ((maxgen_ratio > 0.5) && (maxgen_frag_ratio > 0.1));
35574 // We need to adjust elevation here because if there's enough fragmentation it's not
35576 if (maxgen_highfrag_p)
35578 settings.should_lock_elevation = FALSE;
35579 dprintf (GTC_LOG, ("high frag gen2, turn off elevation"));
35582 return maxgen_highfrag_p;
35585 void gc_heap::do_post_gc()
35587 if (!settings.concurrent)
35593 #ifdef COUNT_CYCLES
35594 AllocStart = GetCycleCount32();
35596 AllocStart = clock();
35597 #endif //COUNT_CYCLES
35600 #ifdef MULTIPLE_HEAPS
35601 gc_heap* hp = g_heaps[0];
35604 #endif //MULTIPLE_HEAPS
35606 GCToEEInterface::GcDone(settings.condemned_generation);
35608 GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
35609 (uint32_t)settings.condemned_generation,
35610 (uint32_t)settings.reason,
35611 !!settings.concurrent);
35613 //dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)",
35614 dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)",
35615 VolatileLoad(&settings.gc_index),
35616 dd_collection_count(hp->dynamic_data_of(0)),
35617 settings.condemned_generation,
35618 (settings.concurrent ? "BGC" : "GC")));
35620 if (settings.exit_memory_load != 0)
35621 last_gc_memory_load = settings.exit_memory_load;
35622 else if (settings.entry_memory_load != 0)
35623 last_gc_memory_load = settings.entry_memory_load;
35625 last_gc_heap_size = get_total_heap_size();
35626 last_gc_fragmentation = get_total_fragmentation();
35629 if (heap_hard_limit)
35631 size_t total_heap_committed = get_total_committed_size();
35632 size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping;
35633 dprintf (1, ("(%d)GC commit END #%Id: %Id (recorded: %Id), heap %Id, frag: %Id",
35634 settings.condemned_generation,
35635 (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded,
35636 last_gc_heap_size, last_gc_fragmentation));
35640 // Note we only do this at the end of full blocking GCs because we do not want
35641 // to turn on this provisional mode during the middle of a BGC.
35642 if ((settings.condemned_generation == max_generation) && (!settings.concurrent))
35646 size_t full_compacting_gc_count = full_gc_counts[gc_type_compacting];
35647 if (provisional_mode_triggered)
35649 uint64_t r = gc_rand::get_rand(10);
35650 if ((full_compacting_gc_count - provisional_triggered_gc_count) >= r)
35652 provisional_mode_triggered = false;
35653 provisional_off_gc_count = full_compacting_gc_count;
35654 dprintf (GTC_LOG, ("%Id NGC2s when turned on, %Id NGCs since(%Id)",
35655 provisional_triggered_gc_count, (full_compacting_gc_count - provisional_triggered_gc_count),
35656 num_provisional_triggered));
35661 uint64_t r = gc_rand::get_rand(5);
35662 if ((full_compacting_gc_count - provisional_off_gc_count) >= r)
35664 provisional_mode_triggered = true;
35665 provisional_triggered_gc_count = full_compacting_gc_count;
35666 num_provisional_triggered++;
35667 dprintf (GTC_LOG, ("%Id NGC2s when turned off, %Id NGCs since(%Id)",
35668 provisional_off_gc_count, (full_compacting_gc_count - provisional_off_gc_count),
35669 num_provisional_triggered));
35675 if (provisional_mode_triggered)
35677 if ((settings.entry_memory_load < high_memory_load_th) ||
35678 !is_pm_ratio_exceeded())
35680 dprintf (GTC_LOG, ("turning off PM"));
35681 provisional_mode_triggered = false;
35684 else if ((settings.entry_memory_load >= high_memory_load_th) && is_pm_ratio_exceeded())
35686 dprintf (GTC_LOG, ("highmem && highfrag - turning on PM"));
35687 provisional_mode_triggered = true;
35688 num_provisional_triggered++;
35693 GCHeap::UpdatePostGCCounters();
35694 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
35695 //if (g_fEnableARM)
35697 // SystemDomain::GetADSurvivedBytes();
35699 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
35702 STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
35703 (uint32_t)settings.condemned_generation,
35704 (uint32_t)settings.reason);
35705 #endif // STRESS_LOG
35707 #ifdef GC_CONFIG_DRIVEN
35708 if (!settings.concurrent)
35710 if (settings.compaction)
35711 (compact_or_sweep_gcs[0])++;
35713 (compact_or_sweep_gcs[1])++;
35716 #ifdef MULTIPLE_HEAPS
35717 for (int i = 0; i < n_heaps; i++)
35718 g_heaps[i]->record_interesting_info_per_heap();
35720 record_interesting_info_per_heap();
35721 #endif //MULTIPLE_HEAPS
35722 record_global_mechanisms();
35723 #endif //GC_CONFIG_DRIVEN
35726 unsigned GCHeap::GetGcCount()
35728 return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index);
35732 GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
35734 dprintf (2, ("triggered a GC!"));
35736 #ifdef MULTIPLE_HEAPS
35737 gc_heap* hpt = gc_heap::g_heaps[0];
35740 #endif //MULTIPLE_HEAPS
35741 bool cooperative_mode = true;
35742 dynamic_data* dd = hpt->dynamic_data_of (gen);
35743 size_t localCount = dd_collection_count (dd);
35745 enter_spin_lock (&gc_heap::gc_lock);
35746 dprintf (SPINLOCK_LOG, ("GC Egc"));
35747 ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock);
35749 //don't trigger another GC if one was already in progress
35750 //while waiting for the lock
35752 size_t col_count = dd_collection_count (dd);
35754 if (localCount != col_count)
35756 #ifdef SYNCHRONIZATION_STATS
35757 gc_lock_contended++;
35758 #endif //SYNCHRONIZATION_STATS
35759 dprintf (SPINLOCK_LOG, ("no need GC Lgc"));
35760 leave_spin_lock (&gc_heap::gc_lock);
35762 // We don't need to release msl here 'cause this means a GC
35763 // has happened and would have release all msl's.
35768 #ifdef COUNT_CYCLES
35769 int gc_start = GetCycleCount32();
35770 #endif //COUNT_CYCLES
35773 #ifdef COUNT_CYCLES
35774 AllocDuration += GetCycleCount32() - AllocStart;
35776 AllocDuration += clock() - AllocStart;
35777 #endif //COUNT_CYCLES
35780 gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
35781 (reason == reason_lowmemory_blocking) ||
35782 (gc_heap::latency_level == latency_level_memory_footprint);
35784 gc_trigger_reason = reason;
35786 #ifdef MULTIPLE_HEAPS
35787 for (int i = 0; i < gc_heap::n_heaps; i++)
35789 gc_heap::g_heaps[i]->reset_gc_done();
35792 gc_heap::reset_gc_done();
35793 #endif //MULTIPLE_HEAPS
35795 gc_heap::gc_started = TRUE;
35798 init_sync_log_stats();
35800 #ifndef MULTIPLE_HEAPS
35801 cooperative_mode = gc_heap::enable_preemptive ();
35803 dprintf (2, ("Suspending EE"));
35804 BEGIN_TIMING(suspend_ee_during_log);
35805 GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
35806 END_TIMING(suspend_ee_during_log);
35807 gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc();
35808 gc_heap::disable_preemptive (cooperative_mode);
35809 if (gc_heap::proceed_with_gc_p)
35810 pGenGCHeap->settings.init_mechanisms();
35812 gc_heap::update_collection_counts_for_no_gc();
35814 #endif //!MULTIPLE_HEAPS
35817 // MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));
35820 #ifdef COUNT_CYCLES
35823 start = GetCycleCount32();
35828 #endif //COUNT_CYCLES
35829 PromotedObjectCount = 0;
35832 unsigned int condemned_generation_number = gen;
35834 // We want to get a stack from the user thread that triggered the GC
35835 // instead of on the GC thread which is the case for Server GC.
35836 // But we are doing it for Workstation GC as well to be uniform.
35837 FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason));
35839 #ifdef MULTIPLE_HEAPS
35840 GcCondemnedGeneration = condemned_generation_number;
35842 cooperative_mode = gc_heap::enable_preemptive ();
35844 BEGIN_TIMING(gc_during_log);
35845 gc_heap::ee_suspend_event.Set();
35846 gc_heap::wait_for_gc_done();
35847 END_TIMING(gc_during_log);
35849 gc_heap::disable_preemptive (cooperative_mode);
35851 condemned_generation_number = GcCondemnedGeneration;
35853 if (gc_heap::proceed_with_gc_p)
35855 BEGIN_TIMING(gc_during_log);
35856 pGenGCHeap->garbage_collect (condemned_generation_number);
35857 if (gc_heap::pm_trigger_full_gc)
35859 pGenGCHeap->garbage_collect_pm_full_gc();
35861 END_TIMING(gc_during_log);
35863 #endif //MULTIPLE_HEAPS
35866 #ifdef COUNT_CYCLES
35867 finish = GetCycleCount32();
35870 #endif //COUNT_CYCLES
35871 GcDuration += finish - start;
35873 ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
35874 VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
35875 finish - start, GcDuration,
35876 AllocCount ? (AllocDuration / AllocCount) : 0,
35877 AllocSmallCount, AllocBigCount));
35882 #ifdef BACKGROUND_GC
35883 // We are deciding whether we should fire the alloc wait end event here
35884 // because in begin_foreground we could be calling end_foreground
35885 // if we need to retry.
35886 if (gc_heap::alloc_wait_event_p)
35888 hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc);
35889 gc_heap::alloc_wait_event_p = FALSE;
35891 #endif //BACKGROUND_GC
35893 #ifndef MULTIPLE_HEAPS
35894 #ifdef BACKGROUND_GC
35895 if (!gc_heap::dont_restart_ee_p)
35897 #endif //BACKGROUND_GC
35898 BEGIN_TIMING(restart_ee_during_log);
35899 GCToEEInterface::RestartEE(TRUE);
35900 END_TIMING(restart_ee_during_log);
35901 #ifdef BACKGROUND_GC
35903 #endif //BACKGROUND_GC
35904 #endif //!MULTIPLE_HEAPS
35906 #ifdef COUNT_CYCLES
35907 printf ("GC: %d Time: %d\n", GcCondemnedGeneration,
35908 GetCycleCount32() - gc_start);
35909 #endif //COUNT_CYCLES
35911 #ifndef MULTIPLE_HEAPS
35912 process_sync_log_stats();
35913 gc_heap::gc_started = FALSE;
35914 gc_heap::set_gc_done();
35915 dprintf (SPINLOCK_LOG, ("GC Lgc"));
35916 leave_spin_lock (&gc_heap::gc_lock);
35917 #endif //!MULTIPLE_HEAPS
35919 #ifdef FEATURE_PREMORTEM_FINALIZATION
35920 GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers);
35921 #endif // FEATURE_PREMORTEM_FINALIZATION
35923 return dd_collection_count (dd);
35926 size_t GCHeap::GetTotalBytesInUse ()
35928 #ifdef MULTIPLE_HEAPS
35929 //enumarate all the heaps and get their size.
35930 size_t tot_size = 0;
35931 for (int i = 0; i < gc_heap::n_heaps; i++)
35933 GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap;
35934 tot_size += Hp->ApproxTotalBytesInUse (FALSE);
35938 return ApproxTotalBytesInUse ();
35939 #endif //MULTIPLE_HEAPS
35942 int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count)
35944 if (get_bgc_fgc_count != 0)
35946 #ifdef BACKGROUND_GC
35947 if (generation == max_generation)
35949 return (int)(gc_heap::full_gc_counts[gc_type_background]);
35953 return (int)(gc_heap::ephemeral_fgc_counts[generation]);
35957 #endif //BACKGROUND_GC
35960 #ifdef MULTIPLE_HEAPS
35961 gc_heap* hp = gc_heap::g_heaps [0];
35962 #else //MULTIPLE_HEAPS
35963 gc_heap* hp = pGenGCHeap;
35964 #endif //MULTIPLE_HEAPS
35965 if (generation > max_generation)
35968 return (int)dd_collection_count (hp->dynamic_data_of (generation));
35971 size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
35973 size_t totsize = 0;
35975 //ASSERT(InMustComplete());
35976 enter_spin_lock (&pGenGCHeap->gc_lock);
35978 heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
35979 // Get small block heap size info
35980 totsize = (pGenGCHeap->alloc_allocated - heap_segment_mem (eph_seg));
35981 heap_segment* seg1 = generation_start_segment (pGenGCHeap->generation_of (max_generation));
35982 while (seg1 != eph_seg)
35984 totsize += heap_segment_allocated (seg1) -
35985 heap_segment_mem (seg1);
35986 seg1 = heap_segment_next (seg1);
35989 //discount the fragmentation
35990 for (int i = 0; i <= max_generation; i++)
35992 generation* gen = pGenGCHeap->generation_of (i);
35993 totsize -= (generation_free_list_space (gen) + generation_free_obj_space (gen));
35996 if (!small_heap_only)
35998 heap_segment* seg2 = generation_start_segment (pGenGCHeap->generation_of (max_generation+1));
36002 totsize += heap_segment_allocated (seg2) -
36003 heap_segment_mem (seg2);
36004 seg2 = heap_segment_next (seg2);
36007 //discount the fragmentation
36008 generation* loh_gen = pGenGCHeap->generation_of (max_generation+1);
36009 size_t frag = generation_free_list_space (loh_gen) + generation_free_obj_space (loh_gen);
36012 leave_spin_lock (&pGenGCHeap->gc_lock);
36016 #ifdef MULTIPLE_HEAPS
36017 void GCHeap::AssignHeap (alloc_context* acontext)
36019 // Assign heap based on processor
36020 acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext, 0)));
36021 acontext->set_home_heap(acontext->get_alloc_heap());
36023 GCHeap* GCHeap::GetHeap (int n)
36025 assert (n < gc_heap::n_heaps);
36026 return gc_heap::g_heaps [n]->vm_heap;
36028 #endif //MULTIPLE_HEAPS
36030 bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
36032 alloc_context* acontext = static_cast<alloc_context*>(context);
36033 #ifdef MULTIPLE_HEAPS
36034 return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
36035 ((acontext->get_home_heap() == 0) && (thread_number == 0)));
36037 UNREFERENCED_PARAMETER(acontext);
36038 UNREFERENCED_PARAMETER(thread_number);
36040 #endif //MULTIPLE_HEAPS
36043 // Returns the number of processors required to trigger the use of thread based allocation contexts
36044 int GCHeap::GetNumberOfHeaps ()
36046 #ifdef MULTIPLE_HEAPS
36047 return gc_heap::n_heaps;
36050 #endif //MULTIPLE_HEAPS
36054 in this way we spend extra time cycling through all the heaps while create the handle
36055 it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number)
36057 int GCHeap::GetHomeHeapNumber ()
36059 #ifdef MULTIPLE_HEAPS
36060 gc_alloc_context* ctx = GCToEEInterface::GetAllocContext();
36066 GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
36067 return (hp ? hp->pGenGCHeap->heap_number : 0);
36070 #endif //MULTIPLE_HEAPS
36073 unsigned int GCHeap::GetCondemnedGeneration()
36075 return gc_heap::settings.condemned_generation;
36078 void GCHeap::GetMemoryInfo(uint32_t* highMemLoadThreshold,
36079 uint64_t* totalPhysicalMem,
36080 uint32_t* lastRecordedMemLoad,
36081 size_t* lastRecordedHeapSize,
36082 size_t* lastRecordedFragmentation)
36084 *highMemLoadThreshold = gc_heap::high_memory_load_th;
36085 *totalPhysicalMem = gc_heap::total_physical_mem;
36086 *lastRecordedMemLoad = gc_heap::last_gc_memory_load;
36087 *lastRecordedHeapSize = gc_heap::last_gc_heap_size;
36088 *lastRecordedFragmentation = gc_heap::last_gc_fragmentation;
36091 int GCHeap::GetGcLatencyMode()
36093 return (int)(pGenGCHeap->settings.pause_mode);
36096 int GCHeap::SetGcLatencyMode (int newLatencyMode)
36098 if (gc_heap::settings.pause_mode == pause_no_gc)
36099 return (int)set_pause_mode_no_gc;
36101 gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode;
36103 if (new_mode == pause_low_latency)
36105 #ifndef MULTIPLE_HEAPS
36106 pGenGCHeap->settings.pause_mode = new_mode;
36107 #endif //!MULTIPLE_HEAPS
36109 else if (new_mode == pause_sustained_low_latency)
36111 #ifdef BACKGROUND_GC
36112 if (gc_heap::gc_can_use_concurrent)
36114 pGenGCHeap->settings.pause_mode = new_mode;
36116 #endif //BACKGROUND_GC
36120 pGenGCHeap->settings.pause_mode = new_mode;
36123 #ifdef BACKGROUND_GC
36124 if (recursive_gc_sync::background_running_p())
36126 // If we get here, it means we are doing an FGC. If the pause
36127 // mode was altered we will need to save it in the BGC settings.
36128 if (gc_heap::saved_bgc_settings.pause_mode != new_mode)
36130 gc_heap::saved_bgc_settings.pause_mode = new_mode;
36133 #endif //BACKGROUND_GC
36135 return (int)set_pause_mode_success;
36138 int GCHeap::GetLOHCompactionMode()
36140 return pGenGCHeap->loh_compaction_mode;
36143 void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode)
36145 #ifdef FEATURE_LOH_COMPACTION
36146 pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionyMode;
36147 #endif //FEATURE_LOH_COMPACTION
36150 bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
36151 uint32_t lohPercentage)
36153 #ifdef MULTIPLE_HEAPS
36154 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36156 gc_heap* hp = gc_heap::g_heaps [hn];
36157 hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0));
36159 #else //MULTIPLE_HEAPS
36160 pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0));
36161 #endif //MULTIPLE_HEAPS
36163 pGenGCHeap->full_gc_approach_event.Reset();
36164 pGenGCHeap->full_gc_end_event.Reset();
36165 pGenGCHeap->full_gc_approach_event_set = false;
36167 pGenGCHeap->fgn_maxgen_percent = gen2Percentage;
36168 pGenGCHeap->fgn_loh_percent = lohPercentage;
36173 bool GCHeap::CancelFullGCNotification()
36175 pGenGCHeap->fgn_maxgen_percent = 0;
36176 pGenGCHeap->fgn_loh_percent = 0;
36178 pGenGCHeap->full_gc_approach_event.Set();
36179 pGenGCHeap->full_gc_end_event.Set();
36184 int GCHeap::WaitForFullGCApproach(int millisecondsTimeout)
36186 dprintf (2, ("WFGA: Begin wait"));
36187 int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout);
36188 dprintf (2, ("WFGA: End wait"));
36192 int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
36194 dprintf (2, ("WFGE: Begin wait"));
36195 int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout);
36196 dprintf (2, ("WFGE: End wait"));
36200 int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
36202 NoGCRegionLockHolder lh;
36204 dprintf (1, ("begin no gc called"));
36205 start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC);
36206 if (status == start_no_gc_success)
36208 GarbageCollect (max_generation);
36209 status = gc_heap::get_start_no_gc_region_status();
36212 if (status != start_no_gc_success)
36213 gc_heap::handle_failure_for_no_gc();
36215 return (int)status;
36218 int GCHeap::EndNoGCRegion()
36220 NoGCRegionLockHolder lh;
36221 return (int)gc_heap::end_no_gc_region();
36224 void GCHeap::PublishObject (uint8_t* Obj)
36226 #ifdef BACKGROUND_GC
36227 gc_heap* hp = gc_heap::heap_of (Obj);
36228 hp->bgc_alloc_lock->loh_alloc_done (Obj);
36229 hp->bgc_untrack_loh_alloc();
36230 #endif //BACKGROUND_GC
36233 // The spec for this one isn't clear. This function
36234 // returns the size that can be allocated without
36235 // triggering a GC of any kind.
36236 size_t GCHeap::ApproxFreeBytes()
36239 //ASSERT(InMustComplete());
36240 enter_spin_lock (&pGenGCHeap->gc_lock);
36242 generation* gen = pGenGCHeap->generation_of (0);
36243 size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen);
36245 leave_spin_lock (&pGenGCHeap->gc_lock);
36250 HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
36252 if ((gen < 0) || (gen > max_generation))
36254 #ifdef MULTIPLE_HEAPS
36255 counters->current_size = 0;
36256 counters->promoted_size = 0;
36257 counters->collection_count = 0;
36259 //enumarate all the heaps and get their counters.
36260 for (int i = 0; i < gc_heap::n_heaps; i++)
36262 dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen);
36264 counters->current_size += dd_current_size (dd);
36265 counters->promoted_size += dd_promoted_size (dd);
36267 counters->collection_count += dd_collection_count (dd);
36270 dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen);
36271 counters->current_size = dd_current_size (dd);
36272 counters->promoted_size = dd_promoted_size (dd);
36273 counters->collection_count = dd_collection_count (dd);
36274 #endif //MULTIPLE_HEAPS
36278 // Get the segment size to use, making sure it conforms.
36279 size_t GCHeap::GetValidSegmentSize(bool large_seg)
36281 return (large_seg ? gc_heap::min_loh_segment_size : gc_heap::soh_segment_size);
36284 // Get the max gen0 heap size, making sure it conforms.
36285 size_t gc_heap::get_gen0_min_size()
36287 size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
36288 bool is_config_invalid = ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size));
36289 if (is_config_invalid)
36292 // performance data seems to indicate halving the size results
36293 // in optimal perf. Ask for adjusted gen0 size.
36294 gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024));
36296 // if gen0 size is too large given the available memory, reduce it.
36297 // Get true cache size, as we don't want to reduce below this.
36298 size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024));
36299 dprintf (1, ("cache: %Id-%Id",
36300 GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),
36301 GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)));
36303 int n_heaps = gc_heap::n_heaps;
36305 size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE);
36306 gen0size = max((4*trueSize/5),(256*1024));
36307 trueSize = max(trueSize, (256*1024));
36311 dprintf (1, ("gen0size: %Id * %d = %Id, physical mem: %Id / 6 = %Id",
36312 gen0size, n_heaps, (gen0size * n_heaps),
36313 gc_heap::total_physical_mem,
36314 gc_heap::total_physical_mem / 6));
36316 // if the total min GC across heaps will exceed 1/6th of available memory,
36317 // then reduce the min GC size until it either fits or has been reduced to cache size.
36318 while ((gen0size * n_heaps) > (gc_heap::total_physical_mem / 6))
36320 gen0size = gen0size / 2;
36321 if (gen0size <= trueSize)
36323 gen0size = trueSize;
36329 size_t seg_size = gc_heap::soh_segment_size;
36332 // Generation 0 must never be more than 1/2 the segment size.
36333 if (gen0size >= (seg_size / 2))
36334 gen0size = seg_size / 2;
36336 // If the value from config is valid we use it as is without this adjustment.
36337 if (is_config_invalid)
36339 if (heap_hard_limit)
36341 size_t gen0size_seg = seg_size / 8;
36342 if (gen0size >= gen0size_seg)
36344 dprintf (1, ("gen0 limited by seg size %Id->%Id", gen0size, gen0size_seg));
36345 gen0size = gen0size_seg;
36349 gen0size = gen0size / 8 * 5;
36352 gen0size = Align (gen0size);
36357 void GCHeap::SetReservedVMLimit (size_t vmlimit)
36359 gc_heap::reserved_memory_limit = vmlimit;
36362 //versions of same method on each heap
36364 #ifdef FEATURE_PREMORTEM_FINALIZATION
36366 Object* GCHeap::GetNextFinalizableObject()
36369 #ifdef MULTIPLE_HEAPS
36371 //return the first non critical one in the first queue.
36372 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36374 gc_heap* hp = gc_heap::g_heaps [hn];
36375 Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE);
36379 //return the first non crtitical/critical one in the first queue.
36380 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36382 gc_heap* hp = gc_heap::g_heaps [hn];
36383 Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE);
36390 #else //MULTIPLE_HEAPS
36391 return pGenGCHeap->finalize_queue->GetNextFinalizableObject();
36392 #endif //MULTIPLE_HEAPS
36396 size_t GCHeap::GetNumberFinalizableObjects()
36398 #ifdef MULTIPLE_HEAPS
36400 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36402 gc_heap* hp = gc_heap::g_heaps [hn];
36403 cnt += hp->finalize_queue->GetNumberFinalizableObjects();
36408 #else //MULTIPLE_HEAPS
36409 return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects();
36410 #endif //MULTIPLE_HEAPS
36413 size_t GCHeap::GetFinalizablePromotedCount()
36415 #ifdef MULTIPLE_HEAPS
36418 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36420 gc_heap* hp = gc_heap::g_heaps [hn];
36421 cnt += hp->finalize_queue->GetPromotedCount();
36425 #else //MULTIPLE_HEAPS
36426 return pGenGCHeap->finalize_queue->GetPromotedCount();
36427 #endif //MULTIPLE_HEAPS
36430 bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
36432 #ifdef MULTIPLE_HEAPS
36433 bool foundp = false;
36434 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36436 gc_heap* hp = gc_heap::g_heaps [hn];
36437 if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers))
36442 #else //MULTIPLE_HEAPS
36443 return pGenGCHeap->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers);
36444 #endif //MULTIPLE_HEAPS
36447 bool GCHeap::ShouldRestartFinalizerWatchDog()
36449 // This condition was historically used as part of the condition to detect finalizer thread timeouts
36450 return gc_heap::gc_lock.lock != -1;
36453 void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
36455 #ifdef MULTIPLE_HEAPS
36456 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
36458 gc_heap* hp = gc_heap::g_heaps [hn];
36459 hp->finalize_queue->SetSegForShutDown(fHasLock);
36462 #else //MULTIPLE_HEAPS
36463 pGenGCHeap->finalize_queue->SetSegForShutDown(fHasLock);
36464 #endif //MULTIPLE_HEAPS
36467 //---------------------------------------------------------------------------
36468 // Finalized class tracking
36469 //---------------------------------------------------------------------------
36471 bool GCHeap::RegisterForFinalization (int gen, Object* obj)
36475 if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
36477 //just reset the bit
36478 ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
36483 gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj);
36484 return hp->finalize_queue->RegisterForFinalization (gen, obj);
36488 void GCHeap::SetFinalizationRun (Object* obj)
36490 ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN);
36494 //--------------------------------------------------------------------
36496 // Support for finalization
36498 //--------------------------------------------------------------------
36501 unsigned int gen_segment (int gen)
36503 assert (((signed)NUMBERGENERATIONS - gen - 1)>=0);
36504 return (NUMBERGENERATIONS - gen - 1);
36507 bool CFinalize::Initialize()
36514 m_Array = new (nothrow)(Object*[100]);
36519 STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
36520 if (GCConfig::GetBreakOnOOM())
36522 GCToOSInterface::DebugBreak();
36526 m_EndArray = &m_Array[100];
36528 for (int i =0; i < FreeList; i++)
36530 SegQueueLimit (i) = m_Array;
36532 m_PromotedCount = 0;
36535 lockowner_threadid.Clear();
36541 CFinalize::~CFinalize()
36546 size_t CFinalize::GetPromotedCount ()
36548 return m_PromotedCount;
36552 void CFinalize::EnterFinalizeLock()
36554 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
36555 GCToEEInterface::GetThread() == 0 ||
36556 GCToEEInterface::IsPreemptiveGCDisabled());
36559 if (Interlocked::CompareExchange(&lock, 0, -1) >= 0)
36561 unsigned int i = 0;
36564 YieldProcessor(); // indicate to the processor that we are spinning
36566 GCToOSInterface::YieldThread (0);
36568 GCToOSInterface::Sleep (5);
36574 lockowner_threadid.SetToCurrentThread();
36579 void CFinalize::LeaveFinalizeLock()
36581 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
36582 GCToEEInterface::GetThread() == 0 ||
36583 GCToEEInterface::IsPreemptiveGCDisabled());
36586 lockowner_threadid.Clear();
36592 CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
36599 EnterFinalizeLock();
36601 unsigned int dest = 0;
36603 if (g_fFinalizerRunOnShutDown)
36605 //no method table available yet,
36606 //put it in the finalizer queue and sort out when
36608 dest = FinalizerListSeg;
36612 dest = gen_segment (gen);
36614 // Adjust boundary for segments so that GC will keep objects alive.
36615 Object*** s_i = &SegQueue (FreeList);
36616 if ((*s_i) == m_EndArray)
36620 LeaveFinalizeLock();
36621 if (method_table(obj) == NULL)
36623 // If the object is uninitialized, a valid size should have been passed.
36624 assert (size >= Align (min_obj_size));
36625 dprintf (3, ("Making unused array [%Ix, %Ix[", (size_t)obj, (size_t)(obj+size)));
36626 ((CObjectHeader*)obj)->SetFree(size);
36628 STRESS_LOG_OOM_STACK(0);
36629 if (GCConfig::GetBreakOnOOM())
36631 GCToOSInterface::DebugBreak();
36636 Object*** end_si = &SegQueueLimit (dest);
36639 //is the segment empty?
36640 if (!(*s_i == *(s_i-1)))
36642 //no, swap the end elements.
36643 *(*s_i) = *(*(s_i-1));
36645 //increment the fill pointer
36647 //go to the next segment.
36649 } while (s_i > end_si);
36651 // We have reached the destination segment
36652 // store the object
36654 // increment the fill pointer
36657 LeaveFinalizeLock();
36663 CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
36667 EnterFinalizeLock();
36670 if (!IsSegEmpty(FinalizerListSeg))
36672 if (g_fFinalizerRunOnShutDown)
36674 obj = *(SegQueueLimit (FinalizerListSeg)-1);
36675 if (method_table(obj)->HasCriticalFinalizer())
36677 MoveItem ((SegQueueLimit (FinalizerListSeg)-1),
36678 FinalizerListSeg, CriticalFinalizerListSeg);
36682 --SegQueueLimit (FinalizerListSeg);
36685 obj = *(--SegQueueLimit (FinalizerListSeg));
36688 else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg))
36690 //the FinalizerList is empty, we can adjust both
36691 // limit instead of moving the object to the free list
36692 obj = *(--SegQueueLimit (CriticalFinalizerListSeg));
36693 --SegQueueLimit (FinalizerListSeg);
36697 dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj)));
36699 LeaveFinalizeLock();
36704 CFinalize::SetSegForShutDown(BOOL fHasLock)
36709 EnterFinalizeLock();
36710 for (i = 0; i <= max_generation; i++)
36712 unsigned int seg = gen_segment (i);
36713 Object** startIndex = SegQueueLimit (seg)-1;
36714 Object** stopIndex = SegQueue (seg);
36715 for (Object** po = startIndex; po >= stopIndex; po--)
36718 if (method_table(obj)->HasCriticalFinalizer())
36720 MoveItem (po, seg, CriticalFinalizerListSeg);
36724 MoveItem (po, seg, FinalizerListSeg);
36729 LeaveFinalizeLock();
36733 CFinalize::DiscardNonCriticalObjects()
36735 //empty the finalization queue
36736 Object** startIndex = SegQueueLimit (FinalizerListSeg)-1;
36737 Object** stopIndex = SegQueue (FinalizerListSeg);
36738 for (Object** po = startIndex; po >= stopIndex; po--)
36740 MoveItem (po, FinalizerListSeg, FreeList);
36745 CFinalize::GetNumberFinalizableObjects()
36747 return SegQueueLimit (FinalizerListSeg) -
36748 (g_fFinalizerRunOnShutDown ? m_Array : SegQueue(FinalizerListSeg));
36752 CFinalize::FinalizeSegForAppDomain (void *pDomain,
36753 BOOL fRunFinalizers,
36756 BOOL finalizedFound = FALSE;
36757 Object** endIndex = SegQueue (Seg);
36758 for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
36760 CObjectHeader* obj = (CObjectHeader*)*i;
36762 // Objects are put into the finalization queue before they are complete (ie their methodtable
36763 // may be null) so we must check that the object we found has a method table before checking
36764 // if it has the index we are looking for. If the methodtable is null, it can't be from the
36765 // unloading domain, so skip it.
36766 if (method_table(obj) == NULL)
36771 // does the EE actually want us to finalize this object?
36772 if (!GCToEEInterface::ShouldFinalizeObjectForUnload(pDomain, obj))
36777 if (!fRunFinalizers || (obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
36779 //remove the object because we don't want to
36780 //run the finalizer
36781 MoveItem (i, Seg, FreeList);
36782 //Reset the bit so it will be put back on the queue
36783 //if resurrected and re-registered.
36784 obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
36788 if (method_table(obj)->HasCriticalFinalizer())
36790 finalizedFound = TRUE;
36791 MoveItem (i, Seg, CriticalFinalizerListSeg);
36795 if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
36797 MoveItem (i, Seg, FreeList);
36801 finalizedFound = TRUE;
36802 MoveItem (i, Seg, FinalizerListSeg);
36808 return finalizedFound;
36812 CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
36814 bool finalizedFound = false;
36816 unsigned int startSeg = gen_segment (max_generation);
36818 EnterFinalizeLock();
36820 for (unsigned int Seg = startSeg; Seg <= gen_segment (0); Seg++)
36822 if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg))
36824 finalizedFound = true;
36828 LeaveFinalizeLock();
36830 return finalizedFound;
36834 CFinalize::MoveItem (Object** fromIndex,
36835 unsigned int fromSeg,
36836 unsigned int toSeg)
36840 ASSERT (fromSeg != toSeg);
36841 if (fromSeg > toSeg)
36845 // Place the element at the boundary closest to dest
36846 Object** srcIndex = fromIndex;
36847 for (unsigned int i = fromSeg; i != toSeg; i+= step)
36849 Object**& destFill = m_FillPointers[i+(step - 1 )/2];
36850 Object** destIndex = destFill - (step + 1)/2;
36851 if (srcIndex != destIndex)
36853 Object* tmp = *srcIndex;
36854 *srcIndex = *destIndex;
36858 srcIndex = destIndex;
36863 CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
36869 pSC->thread_number = hn;
36871 //scan the finalization queue
36872 Object** startIndex = SegQueue (CriticalFinalizerListSeg);
36873 Object** stopIndex = SegQueueLimit (FinalizerListSeg);
36875 for (Object** po = startIndex; po < stopIndex; po++)
36878 //dprintf (3, ("scan freacheable %Ix", (size_t)o));
36879 dprintf (3, ("scan f %Ix", (size_t)o));
36880 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
36881 if (g_fEnableAppDomainMonitoring)
36883 pSC->pCurrentDomain = GCToEEInterface::GetAppDomainForObject(o);
36885 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
36891 void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
36893 Object** startIndex = SegQueue (CriticalFinalizerListSeg);
36894 Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
36895 Object** stopIndex = SegQueueLimit (FinalizerListSeg);
36896 for (Object** po = startIndex; po < stopIndex; po++)
36899 fn(po < stopCriticalIndex, *po);
36904 CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
36908 sc.promotion = TRUE;
36909 #ifdef MULTIPLE_HEAPS
36910 sc.thread_number = hp->heap_number;
36912 UNREFERENCED_PARAMETER(hp);
36913 #endif //MULTIPLE_HEAPS
36915 BOOL finalizedFound = FALSE;
36917 //start with gen and explore all the younger generations.
36918 unsigned int startSeg = gen_segment (gen);
36920 m_PromotedCount = 0;
36921 for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++)
36923 Object** endIndex = SegQueue (Seg);
36924 for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
36926 CObjectHeader* obj = (CObjectHeader*)*i;
36927 dprintf (3, ("scanning: %Ix", (size_t)obj));
36928 if (!g_theGCHeap->IsPromoted (obj))
36930 dprintf (3, ("freacheable: %Ix", (size_t)obj));
36932 assert (method_table(obj)->HasFinalizer());
36934 if (GCToEEInterface::EagerFinalized(obj))
36936 MoveItem (i, Seg, FreeList);
36938 else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
36940 //remove the object because we don't want to
36941 //run the finalizer
36943 MoveItem (i, Seg, FreeList);
36945 //Reset the bit so it will be put back on the queue
36946 //if resurrected and re-registered.
36947 obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
36954 if (method_table(obj)->HasCriticalFinalizer())
36956 MoveItem (i, Seg, CriticalFinalizerListSeg);
36960 MoveItem (i, Seg, FinalizerListSeg);
36964 #ifdef BACKGROUND_GC
36967 if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
36969 // TODO - fix the following line.
36970 //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
36971 dprintf (3, ("%Ix is marked", (size_t)obj));
36974 #endif //BACKGROUND_GC
36978 finalizedFound = !IsSegEmpty(FinalizerListSeg) ||
36979 !IsSegEmpty(CriticalFinalizerListSeg);
36981 if (finalizedFound)
36983 //Promote the f-reachable objects
36985 #ifdef MULTIPLE_HEAPS
36989 #endif //MULTIPLE_HEAPS
36992 hp->settings.found_finalizers = TRUE;
36994 #ifdef BACKGROUND_GC
36995 if (hp->settings.concurrent)
36997 hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg));
36999 #endif //BACKGROUND_GC
37000 if (hp->settings.concurrent && hp->settings.found_finalizers)
37003 GCToEEInterface::EnableFinalization(true);
37007 return finalizedFound;
37010 //Relocates all of the objects in the finalization array
37012 CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
37015 sc.promotion = FALSE;
37016 #ifdef MULTIPLE_HEAPS
37017 sc.thread_number = hp->heap_number;
37019 UNREFERENCED_PARAMETER(hp);
37020 #endif //MULTIPLE_HEAPS
37022 unsigned int Seg = gen_segment (gen);
37024 Object** startIndex = SegQueue (Seg);
37025 for (Object** po = startIndex; po < SegQueue (FreeList);po++)
37027 GCHeap::Relocate (po, &sc);
37032 CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
37034 // update the generation fill pointers.
37035 // if gen_0_empty is FALSE, test each object to find out if
37036 // it was promoted or not
37039 for (int i = min (gen+1, max_generation); i > 0; i--)
37041 m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)];
37046 //Look for demoted or promoted plugs
37048 for (int i = gen; i >= 0; i--)
37050 unsigned int Seg = gen_segment (i);
37051 Object** startIndex = SegQueue (Seg);
37053 for (Object** po = startIndex;
37054 po < SegQueueLimit (gen_segment(i)); po++)
37056 int new_gen = g_theGCHeap->WhichGeneration (*po);
37062 MoveItem (po, gen_segment (i), gen_segment (new_gen));
37067 MoveItem (po, gen_segment (i), gen_segment (new_gen));
37068 //back down in order to see all objects.
37079 CFinalize::GrowArray()
37081 size_t oldArraySize = (m_EndArray - m_Array);
37082 size_t newArraySize = (size_t)(((float)oldArraySize / 10) * 12);
37084 Object** newArray = new (nothrow) Object*[newArraySize];
37087 // It's not safe to throw here, because of the FinalizeLock. Tell our caller
37088 // to throw for us.
37089 // ASSERT (newArray);
37092 memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
37094 //adjust the fill pointers
37095 for (int i = 0; i < FreeList; i++)
37097 m_FillPointers [i] += (newArray - m_Array);
37100 m_Array = newArray;
37101 m_EndArray = &m_Array [newArraySize];
37107 void CFinalize::CheckFinalizerObjects()
37109 for (int i = 0; i <= max_generation; i++)
37111 Object **startIndex = SegQueue (gen_segment (i));
37112 Object **stopIndex = SegQueueLimit (gen_segment (i));
37114 for (Object **po = startIndex; po < stopIndex; po++)
37116 if ((int)g_theGCHeap->WhichGeneration (*po) < i)
37118 ((CObjectHeader*)*po)->Validate();
37122 #endif //VERIFY_HEAP
37124 #endif // FEATURE_PREMORTEM_FINALIZATION
37127 //------------------------------------------------------------------------------
37129 // End of VM specific support
37131 //------------------------------------------------------------------------------
37132 void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
37134 generation* gen = gc_heap::generation_of (gen_number);
37135 heap_segment* seg = generation_start_segment (gen);
37136 uint8_t* x = ((gen_number == max_generation) ? heap_segment_mem (seg) :
37137 generation_allocation_start (gen));
37139 uint8_t* end = heap_segment_allocated (seg);
37140 BOOL small_object_segments = TRUE;
37141 int align_const = get_alignment_constant (small_object_segments);
37148 if ((seg = heap_segment_next (seg)) != 0)
37150 x = heap_segment_mem (seg);
37151 end = heap_segment_allocated (seg);
37156 if (small_object_segments && walk_large_object_heap_p)
37159 small_object_segments = FALSE;
37160 align_const = get_alignment_constant (small_object_segments);
37161 seg = generation_start_segment (large_object_generation);
37162 x = heap_segment_mem (seg);
37163 end = heap_segment_allocated (seg);
37173 size_t s = size (x);
37174 CObjectHeader* o = (CObjectHeader*)x;
37179 _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point
37181 if (!fn (o->GetObjectBase(), context))
37184 x = x + Align (s, align_const);
37188 void gc_heap::walk_finalize_queue (fq_walk_fn fn)
37190 #ifdef FEATURE_PREMORTEM_FINALIZATION
37191 finalize_queue->WalkFReachableObjects (fn);
37192 #endif //FEATURE_PREMORTEM_FINALIZATION
37195 void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
37197 #ifdef MULTIPLE_HEAPS
37198 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
37200 gc_heap* hp = gc_heap::g_heaps [hn];
37202 hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
37205 walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
37206 #endif //MULTIPLE_HEAPS
37209 void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
37211 uint8_t* o = (uint8_t*)obj;
37214 go_through_object_cl (method_table (o), o, size(o), oo,
37218 Object *oh = (Object*)*oo;
37219 if (!fn (oh, context))
37227 void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
37229 gc_heap* hp = (gc_heap*)gc_context;
37230 hp->walk_survivors (fn, diag_context, type);
37233 void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
37235 gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
37238 void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
37240 gc_heap* hp = (gc_heap*)gc_context;
37241 hp->walk_finalize_queue (fn);
37244 void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
37246 #ifdef MULTIPLE_HEAPS
37247 for (int hn = 0; hn < gc_heap::n_heaps; hn++)
37249 gc_heap* hp = gc_heap::g_heaps [hn];
37250 hp->finalize_queue->GcScanRoots(fn, hn, sc);
37253 pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
37254 #endif //MULTIPLE_HEAPS
37257 void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
37259 UNREFERENCED_PARAMETER(gen_number);
37260 GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
37263 void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
37265 UNREFERENCED_PARAMETER(gen_number);
37266 GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
37269 // Go through and touch (read) each page straddled by a memory block.
37270 void TouchPages(void * pStart, size_t cb)
37272 const uint32_t pagesize = OS_PAGE_SIZE;
37273 _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
37276 VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
37277 VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) - (((size_t)pStart) & (pagesize-1)));
37281 a = VolatileLoad(p);
37282 //printf("Touching page %lxh\n", (uint32_t)p);
37288 #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
37289 // This code is designed to catch the failure to update the write barrier
37290 // The way it works is to copy the whole heap right after every GC. The write
37291 // barrier code has been modified so that it updates the shadow as well as the
37292 // real GC heap. Before doing the next GC, we walk the heap, looking for pointers
37293 // that were updated in the real heap, but not the shadow. A mismatch indicates
37294 // an error. The offending code can be found by breaking after the correct GC,
37295 // and then placing a data breakpoint on the Heap location that was updated without
37296 // going through the write barrier.
37298 // Called at process shutdown
37299 void deleteGCShadow()
37301 if (g_GCShadow != 0)
37302 GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
37307 // Called at startup and right after a GC, get a snapshot of the GC Heap
37308 void initGCShadow()
37310 if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK))
37313 size_t len = g_gc_highest_address - g_gc_lowest_address;
37314 if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
37317 g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
37318 if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
37320 _ASSERTE(!"Not enough memory to run HeapVerify level 2");
37321 // If after the assert we decide to allow the program to continue
37322 // running we need to be in a state that will not trigger any
37323 // additional AVs while we fail to allocate a shadow segment, i.e.
37324 // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV
37329 g_GCShadowEnd += len;
37332 // save the value of g_gc_lowest_address at this time. If this value changes before
37333 // the next call to checkGCWriteBarrier() it means we extended the heap (with a
37334 // large object segment most probably), and the whole shadow segment is inconsistent.
37335 g_shadow_lowest_address = g_gc_lowest_address;
37337 //****** Copy the whole GC heap ******
37339 // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment())
37340 // can produce a NULL result. This is because the initialization has not completed.
37342 generation* gen = gc_heap::generation_of (max_generation);
37343 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37345 ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
37346 BOOL small_object_segments = TRUE;
37351 if (small_object_segments)
37353 small_object_segments = FALSE;
37354 seg = heap_segment_rw (generation_start_segment (gc_heap::generation_of (max_generation+1)));
37360 // Copy the segment
37361 uint8_t* start = heap_segment_mem(seg);
37362 uint8_t* end = heap_segment_allocated (seg);
37363 memcpy(start + delta, start, end - start);
37364 seg = heap_segment_next_rw (seg);
37368 #define INVALIDGCVALUE (void*)((size_t)0xcccccccd)
37370 // test to see if 'ptr' was only updated via the write barrier.
37371 inline void testGCShadow(Object** ptr)
37373 Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
37374 if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
37377 // If you get this assertion, someone updated a GC pointer in the heap without
37378 // using the write barrier. To find out who, check the value of
37379 // dd_collection_count (dynamic_data_of (0)). Also
37380 // note the value of 'ptr'. Rerun the App that the previous GC just occurred.
37381 // Then put a data breakpoint for the value of 'ptr' Then check every write
37382 // to pointer between the two GCs. The last one is not using the write barrier.
37384 // If the memory of interest does not exist at system startup,
37385 // you need to set the data breakpoint right after the memory gets committed
37386 // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr'
37387 // in the memory window. run until the memory gets mapped. Then you can set
37390 // Note a recent change, we've identified race conditions when updating the gc shadow.
37391 // Throughout the runtime, code will update an address in the gc heap, then erect the
37392 // write barrier, which calls updateGCShadow. With an app that pounds one heap location
37393 // from multiple threads, you can hit this assert even though all involved are using the
37394 // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE.
37395 // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been
37396 // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr,
37397 // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting
37398 // TODO: erroneous asserts in here.
37400 if(*shadow!=INVALIDGCVALUE)
37402 #ifdef FEATURE_BASICFREEZE
37403 // Write barriers for stores of references to frozen objects may be optimized away.
37404 if (!gc_heap::frozen_object_p(*ptr))
37405 #endif // FEATURE_BASICFREEZE
37407 _ASSERTE(!"Pointer updated without using write barrier");
37413 printf("saw a INVALIDGCVALUE. (just to let you know)\n");
37419 void testGCShadowHelper (uint8_t* x)
37421 size_t s = size (x);
37422 if (contain_pointers (x))
37424 go_through_object_nostart (method_table(x), x, s, oo,
37425 { testGCShadow((Object**) oo); });
37429 // Walk the whole heap, looking for pointers that were not updated with the write barrier.
37430 void checkGCWriteBarrier()
37432 // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
37433 // and the GC shadow segment did not track that change!
37434 if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
37436 // No shadow stack, nothing to check.
37441 generation* gen = gc_heap::generation_of (max_generation);
37442 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37444 PREFIX_ASSUME(seg != NULL);
37448 uint8_t* x = heap_segment_mem(seg);
37449 while (x < heap_segment_allocated (seg))
37451 size_t s = size (x);
37452 testGCShadowHelper (x);
37455 seg = heap_segment_next_rw (seg);
37460 // go through large object heap
37461 int alignment = get_alignment_constant(FALSE);
37462 generation* gen = gc_heap::generation_of (max_generation+1);
37463 heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
37465 PREFIX_ASSUME(seg != NULL);
37469 uint8_t* x = heap_segment_mem(seg);
37470 while (x < heap_segment_allocated (seg))
37472 size_t s = size (x);
37473 testGCShadowHelper (x);
37474 x = x + Align (s, alignment);
37476 seg = heap_segment_next_rw (seg);
37480 #endif //WRITE_BARRIER_CHECK && !SERVER_GC
37482 #endif // !DACCESS_COMPILE
37484 #ifdef FEATURE_BASICFREEZE
37485 void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef)
37487 #ifdef DACCESS_COMPILE
37488 UNREFERENCED_PARAMETER(seg);
37489 UNREFERENCED_PARAMETER(pvContext);
37490 UNREFERENCED_PARAMETER(pfnMethodTable);
37491 UNREFERENCED_PARAMETER(pfnObjRef);
37493 uint8_t *o = heap_segment_mem(seg);
37495 // small heap alignment constant
37496 int alignment = get_alignment_constant(TRUE);
37498 while (o < heap_segment_allocated(seg))
37500 pfnMethodTable(pvContext, o);
37502 if (contain_pointers (o))
37504 go_through_object_nostart (method_table (o), o, size(o), oo,
37507 pfnObjRef(pvContext, oo);
37512 o += Align(size(o), alignment);
37514 #endif //!DACCESS_COMPILE
37516 #endif // FEATURE_BASICFREEZE
37518 #ifndef DACCESS_COMPILE
37519 HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
37521 #ifdef BACKGROUND_GC
37522 if (recursive_gc_sync::background_running_p())
37524 uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
37525 if (dwRet == WAIT_OBJECT_0)
37527 else if (dwRet == WAIT_TIMEOUT)
37528 return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
37530 return E_FAIL; // It is not clear if what the last error would be if the wait failed,
37531 // as there are too many layers in between. The best we can do is to return E_FAIL;
37537 #endif // !DACCESS_COMPILE
37539 void GCHeap::TemporaryEnableConcurrentGC()
37541 #ifdef BACKGROUND_GC
37542 gc_heap::temp_disable_concurrent_p = false;
37543 #endif //BACKGROUND_GC
37546 void GCHeap::TemporaryDisableConcurrentGC()
37548 #ifdef BACKGROUND_GC
37549 gc_heap::temp_disable_concurrent_p = true;
37550 #endif //BACKGROUND_GC
37553 bool GCHeap::IsConcurrentGCEnabled()
37555 #ifdef BACKGROUND_GC
37556 return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
37559 #endif //BACKGROUND_GC
37562 void GCHeap::SetFinalizeRunOnShutdown(bool value)
37564 g_fFinalizerRunOnShutDown = value;
37567 void PopulateDacVars(GcDacVars *gcDacVars)
37569 #ifndef DACCESS_COMPILE
37570 assert(gcDacVars != nullptr);
37572 gcDacVars->major_version_number = 1;
37573 gcDacVars->minor_version_number = 0;
37574 gcDacVars->built_with_svr = &g_built_with_svr_gc;
37575 gcDacVars->build_variant = &g_build_variant;
37576 gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt);
37577 gcDacVars->generation_size = sizeof(generation);
37578 gcDacVars->max_gen = &g_max_generation;
37579 #ifndef MULTIPLE_HEAPS
37580 gcDacVars->mark_array = &gc_heap::mark_array;
37581 gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment);
37582 gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state);
37583 gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg);
37584 gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start;
37585 gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address;
37586 gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address;
37587 gcDacVars->alloc_allocated = &gc_heap::alloc_allocated;
37588 gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj;
37589 gcDacVars->oom_info = &gc_heap::oom_info;
37590 gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue);
37591 gcDacVars->generation_table = reinterpret_cast<dac_generation**>(&gc_heap::generation_table);
37592 #ifdef GC_CONFIG_DRIVEN
37593 gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms);
37594 gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap);
37595 gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap);
37596 gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap);
37597 gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap);
37598 #endif // GC_CONFIG_DRIVEN
37599 #ifdef HEAP_ANALYZE
37600 gcDacVars->internal_root_array = &gc_heap::internal_root_array;
37601 gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index;
37602 gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success;
37603 #endif // HEAP_ANALYZE
37605 gcDacVars->n_heaps = &gc_heap::n_heaps;
37606 gcDacVars->g_heaps = reinterpret_cast<dac_gc_heap***>(&gc_heap::g_heaps);
37607 #endif // MULTIPLE_HEAPS
37608 #endif // DACCESS_COMPILE