BOOL bgc_heap_walk_for_etw_p = FALSE;
#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
-#if defined(FEATURE_REDHAWK)
-#define MAYBE_UNUSED_VAR(v) v = v
-#else
-#define MAYBE_UNUSED_VAR(v)
-#endif // FEATURE_REDHAWK
-
#define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
#define commit_min_th (16*OS_PAGE_SIZE)
uint8_t g_build_variant = 1;
#else
uint8_t g_build_variant = 2;
-#endif // defined(BUILDENV_DEBUG)
+#endif //BUILDENV_DEBUG
VOLATILE(int32_t) g_no_gc_lock = -1;
"sustained_low_latency",
"no_gc"
};
-#endif // defined(DT_LOG) || defined(TRACE_GC)
+#endif //DT_LOG || TRACE_GC
inline
BOOL is_induced (gc_reason reason)
#endif
-#ifdef GC_STATS
-// There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
-// interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
-// Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
-// comparison with the global min/max.
-GCStatistics g_GCStatistics;
-GCStatistics g_LastGCStatistics;
-
-char* GCStatistics::logFileName = NULL;
-FILE* GCStatistics::logFile = NULL;
-
-void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
-{
-#ifdef BACKGROUND_GC
- if (settings.concurrent)
- {
- bgc.Accumulate((uint32_t)timeInMSec*1000);
- cntBGC++;
- }
- else if (settings.background_p)
- {
- fgc.Accumulate((uint32_t)timeInMSec*1000);
- cntFGC++;
- if (settings.compaction)
- cntCompactFGC++;
- assert(settings.condemned_generation < max_generation);
- cntFGCGen[settings.condemned_generation]++;
- }
- else
-#endif // BACKGROUND_GC
- {
- ngc.Accumulate((uint32_t)timeInMSec*1000);
- cntNGC++;
- if (settings.compaction)
- cntCompactNGC++;
- cntNGCGen[settings.condemned_generation]++;
- }
-
- if (is_induced (settings.reason))
- cntReasons[(int)reason_induced]++;
- else if (settings.stress_induced)
- cntReasons[(int)reason_gcstress]++;
- else
- cntReasons[(int)settings.reason]++;
-
-#ifdef BACKGROUND_GC
- if (settings.concurrent || !settings.background_p)
- {
-#endif // BACKGROUND_GC
- RollOverIfNeeded();
-#ifdef BACKGROUND_GC
- }
-#endif // BACKGROUND_GC
-}
-
-void GCStatistics::Initialize()
-{
- LIMITED_METHOD_CONTRACT;
- // for efficiency sake we're taking a dependency on the layout of a C++ object
- // with a vtable. protect against violations of our premise:
- static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
- "The first field of GCStatistics follows the pointer sized vtable");
-
- int podOffs = offsetof(GCStatistics, cntDisplay); // offset of the first POD field
- memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
- memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
-}
-
-void GCStatistics::DisplayAndUpdate()
-{
- LIMITED_METHOD_CONTRACT;
-
- if (logFileName == NULL || logFile == NULL)
- return;
-
- {
- if (cntDisplay == 0)
- fprintf(logFile, "\nGCMix **** Initialize *****\n\n");
-
- fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);
-
- // NGC summary (total, timing info)
- ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);
-
- // FGC summary (total, timing info)
- fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);
-
- // BGC summary
- bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);
-
- // NGC/FGC break out by generation & compacting vs. sweeping
- fprintf(logFile, "NGC ");
- for (int i = max_generation; i >= 0; --i)
- fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
- fprintf(logFile, "\n");
-
- fprintf(logFile, "FGC ");
- for (int i = max_generation-1; i >= 0; --i)
- fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
- fprintf(logFile, "\n");
-
- // Compacting vs. Sweeping break out
- int _cntSweep = cntNGC-cntCompactNGC;
- int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
- fprintf(logFile, "NGC Sweeping %d (%d) Compacting %d (%d)\n",
- _cntSweep - _cntLastSweep, _cntSweep,
- cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);
-
- _cntSweep = cntFGC-cntCompactFGC;
- _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
- fprintf(logFile, "FGC Sweeping %d (%d) Compacting %d (%d)\n",
- _cntSweep - _cntLastSweep, _cntSweep,
- cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);
-
-#ifdef TRACE_GC
- // GC reasons...
- for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
- {
- if (cntReasons[reason] != 0)
- fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason],
- cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
- }
-#endif // TRACE_GC
- fprintf(logFile, "\n\n");
-
- // flush the log file...
- fflush(logFile);
- }
-
- g_LastGCStatistics = *this;
-
- ngc.Reset();
- fgc.Reset();
- bgc.Reset();
-}
-
-#endif // GC_STATS
-
#ifdef BGC_SERVO_TUNING
bool gc_heap::bgc_tuning::enable_fl_tuning = false;
uint32_t gc_heap::bgc_tuning::memory_load_goal = 0;
void c_write (uint32_t& place, uint32_t value)
{
Interlocked::Exchange (&place, value);
- //place = value;
}
#ifndef DACCESS_COMPILE
}
#ifdef SYNCHRONIZATION_STATS
-
// Number of GCs have we done since we last logged.
static unsigned int gc_count_during_log;
// In ms. This is how often we print out stats.
// Cycles accumulated in RestartEE during log_interval.
static uint64_t restart_ee_during_log;
static uint64_t gc_during_log;
-
#endif //SYNCHRONIZATION_STATS
void
join_heap_r_restart = 200
};
-struct join_event
-{
- uint32_t heap;
- join_time time;
- join_type type;
-};
-
class t_join
{
join_structure join_struct;
dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d",
flavor, join_id, color, (int32_t)(join_struct.join_lock)));
- //Thread* current_thread = GCToEEInterface::GetThread();
- //BOOL cooperative_mode = gc_heap::enable_preemptive ();
uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
- //gc_heap::disable_preemptive (cooperative_mode);
if (dwJoinWait != WAIT_OBJECT_0)
{
dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
join_struct.joined_event[!color].Reset();
id = join_id;
- // this one is alone so it can proceed
#ifdef JOIN_STATS
// remember the join id, the last thread arriving, the start of the sequential phase,
// and keep track of the cycles spent waiting in the join
dprintf (JOIN_LOG, ("r_join() Waiting..."));
//busy wait around the color
- respin:
+respin:
int spin_count = 256 * yp_spin_count_unit;
for (int j = 0; j < spin_count; j++)
{
const double ts_scale = 1e-6;
// enable this printf to get statistics on each individual join as it occurs
-// printf("join #%3d seq_loss = %5g par_loss = %5g efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
+ //printf("join #%3d seq_loss = %5g par_loss = %5g efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
elapsed_total[id] += sum;
wake_total[id] += wake;
join_struct.joined_p = FALSE;
join_struct.join_lock = join_struct.n_threads;
dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
-// printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
int color = join_struct.lock_color.LoadWithoutBarrier();
join_struct.lock_color = !color;
join_struct.joined_event[color].Set();
-// printf("Set joined_event %d\n", !join_struct.lock_color);
-
fire_event (join_heap_restart, time_end, type_restart, -1);
#ifdef JOIN_STATS
} \
}
-#ifndef DACCESS_COMPILE
-#ifdef BACKGROUND_GC
+#if defined(BACKGROUND_GC) && !(DACCESS_COMPILE)
#define max_pending_allocs 64
}
};
-// Note that this class was written assuming just synchronization between
-// one background GC thread and multiple user threads that might request
-// an FGC - it does not take into account what kind of locks the multiple
-// user threads might be holding at the time (eg, there could only be one
-// user thread requesting an FGC because it needs to take gc_lock first)
-// so you'll see checks that may not be necessary if you take those conditions
-// into consideration.
-//
-// With the introduction of Server Background GC we no longer use this
-// class to do synchronization between FGCs and BGC.
-class recursive_gc_sync
-{
- static VOLATILE(int32_t) foreground_request_count;//initial state 0
- static VOLATILE(BOOL) gc_background_running; //initial state FALSE
- static VOLATILE(int32_t) foreground_count; // initial state 0;
- static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
- static GCEvent foreground_complete;//Auto Reset
- static GCEvent foreground_allowed;//Auto Reset
-public:
- static void begin_background();
- static void end_background();
- static void begin_foreground();
- static void end_foreground();
- BOOL allow_foreground ();
- static BOOL init();
- static void shutdown();
- static BOOL background_running_p() {return gc_background_running;}
-};
-
-VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
-VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
-VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
-VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
-GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
-GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset
-
-BOOL recursive_gc_sync::init ()
-{
- foreground_request_count = 0;
- foreground_count = 0;
- gc_background_running = FALSE;
- foreground_gate = 0;
-
- if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
- {
- goto error;
- }
- if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
- {
- goto error;
- }
- return TRUE;
-
-error:
- shutdown();
- return FALSE;
-
-}
-
-void recursive_gc_sync::shutdown()
-{
- if (foreground_complete.IsValid())
- foreground_complete.CloseEvent();
- if (foreground_allowed.IsValid())
- foreground_allowed.CloseEvent();
-}
-
-void recursive_gc_sync::begin_background()
-{
- dprintf (2, ("begin background"));
- foreground_request_count = 1;
- foreground_count = 1;
- foreground_allowed.Reset();
- gc_background_running = TRUE;
-}
-void recursive_gc_sync::end_background()
-{
- dprintf (2, ("end background"));
- gc_background_running = FALSE;
- foreground_gate = 1;
- foreground_allowed.Set();
-}
-
-void recursive_gc_sync::begin_foreground()
-{
- dprintf (2, ("begin_foreground"));
-
- bool cooperative_mode = false;
- if (gc_background_running)
- {
- gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
- gc_heap::alloc_wait_event_p = TRUE;
-
-try_again_top:
-
- Interlocked::Increment (&foreground_request_count);
-
-try_again_no_inc:
- dprintf(2, ("Waiting sync gc point"));
- assert (foreground_allowed.IsValid());
- assert (foreground_complete.IsValid());
-
- cooperative_mode = gc_heap::enable_preemptive ();
-
- foreground_allowed.Wait(INFINITE, FALSE);
-
- dprintf(2, ("Waiting sync gc point is done"));
-
- gc_heap::disable_preemptive (cooperative_mode);
-
- if (foreground_gate)
- {
- Interlocked::Increment (&foreground_count);
- dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
- if (foreground_gate)
- {
- gc_heap::settings.concurrent = FALSE;
- return;
- }
- else
- {
- end_foreground();
- goto try_again_top;
- }
- }
- else
- {
- goto try_again_no_inc;
- }
- }
-}
-
-void recursive_gc_sync::end_foreground()
-{
- dprintf (2, ("end_foreground"));
- if (gc_background_running)
- {
- Interlocked::Decrement (&foreground_request_count);
- dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
- if (Interlocked::Decrement (&foreground_count) == 0)
- {
- //c_write ((BOOL*)&foreground_gate, 0);
- // TODO - couldn't make the syntax work with Volatile<T>
- foreground_gate = 0;
- if (foreground_count == 0)
- {
- foreground_allowed.Reset ();
- dprintf(2, ("setting foreground complete event"));
- foreground_complete.Set();
- }
- }
- }
-}
-
-inline
-BOOL recursive_gc_sync::allow_foreground()
-{
- assert (gc_heap::settings.concurrent);
- dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
- (int32_t)foreground_request_count, (int32_t)foreground_count));
-
- BOOL did_fgc = FALSE;
-
- //if we have suspended the EE, just return because
- //some thread could be waiting on this to proceed.
- if (!GCHeap::GcInProgress)
- {
- //TODO BACKGROUND_GC This is to stress the concurrency between
- //background and foreground
-// gc_heap::disallow_new_allocation (0);
-
- //GCToOSInterface::YieldThread(0);
-
- //END of TODO
- if (foreground_request_count != 0)
- {
- //foreground wants to run
- //save the important settings
- //TODO BACKGROUND_GC be more selective about the important settings.
- gc_mechanisms saved_settings = gc_heap::settings;
- do
- {
- did_fgc = TRUE;
- //c_write ((BOOL*)&foreground_gate, 1);
- // TODO - couldn't make the syntax work with Volatile<T>
- foreground_gate = 1;
- foreground_allowed.Set ();
- foreground_complete.Wait (INFINITE, FALSE);
- }while (/*foreground_request_count ||*/ foreground_gate);
-
- assert (!foreground_gate);
-
- //restore the important settings
- gc_heap::settings = saved_settings;
- GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
- //the background GC shouldn't be using gc_high and gc_low
- //gc_low = lowest_address;
- //gc_high = highest_address;
- }
-
- //TODO BACKGROUND_GC This is to stress the concurrency between
- //background and foreground
-// gc_heap::allow_new_allocation (0);
- //END of TODO
- }
-
- dprintf (100, ("leave allow_foreground"));
- assert (gc_heap::settings.concurrent);
- return did_fgc;
-}
-
-#endif //BACKGROUND_GC
-#endif //DACCESS_COMPILE
-
-#ifndef MULTIPLE_HEAPS
-
-#endif // MULTIPLE_HEAPS
+#endif //BACKGROUND_GC && !DACCESS_COMPILE
void reset_memory (uint8_t* o, size_t sizeo);
#ifndef DACCESS_COMPILE
-//check if the write watch APIs are supported.
-
void hardware_write_watch_api_supported()
{
if (GCToOSInterface::SupportsWriteWatch())
dprintf (2,("WriteWatch not supported"));
}
}
-
#endif //!DACCESS_COMPILE
inline bool can_use_hardware_write_watch()
#endif // !DACCESS_COMPILE
typedef void ** PTR_PTR;
-//This function clears a piece of memory
-// size has to be Dword aligned
-
inline
void memclr ( uint8_t* mem, size_t size)
{
dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
assert ((size & (sizeof(PTR_PTR)-1)) == 0);
assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
-
-#if 0
- // The compiler will recognize this pattern and replace it with memset call. We can as well just call
- // memset directly to make it obvious what's going on.
- PTR_PTR m = (PTR_PTR) mem;
- for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
- *(m++) = 0;
-#endif
-
memset (mem, 0, size);
}
const size_t sz2ptr = sizeof(PTR_PTR)*2;
const size_t sz1ptr = sizeof(PTR_PTR)*1;
- // size must be a multiple of the pointer size
assert ((size & (sizeof (PTR_PTR)-1)) == 0);
assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
if (size & sz1ptr)
{
((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
- // dmem += sz1ptr;
- // smem += sz1ptr;
}
-
}
inline
#error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
#endif
-#if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
-#error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
-#endif
-
// Returns true if two pointers have the same large (double than normal) alignment.
inline
BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
#endif //SERVER_GC
-//amount in bytes of the etw allocation tick
const size_t etw_allocation_tick = 100*1024;
const size_t low_latency_alloc = 256*1024;
const int max_snoop_level = 128;
#endif //MH_SC_MARK
-
#ifdef CARD_BUNDLE
//threshold of heap size to turn on card bundles.
#define SH_TH_CARD_BUNDLE (40*1024*1024)
return (uint8_t*)align_lower_page ((size_t)add);
}
-
inline
BOOL power_of_two_p (size_t integer)
{
void virtual_free (void* add, size_t size);
/* per heap static initialization */
-#ifdef MARK_ARRAY
-#ifndef MULTIPLE_HEAPS
+#if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
uint32_t* gc_heap::mark_array;
-#endif //MULTIPLE_HEAPS
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC && !MULTIPLE_HEAPS
#ifdef MARK_LIST
uint8_t** gc_heap::g_mark_list;
size_t gc_heap::mark_list_size;
#endif //MARK_LIST
-#ifdef SEG_MAPPING_TABLE
seg_mapping* seg_mapping_table;
-#endif //SEG_MAPPING_TABLE
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
sorted_table* gc_heap::seg_table;
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
#ifdef MULTIPLE_HEAPS
GCEvent gc_heap::ee_suspend_event;
VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
+VOLATILE(BOOL) gc_heap::gc_background_running = FALSE;
#endif //BACKGROUND_GC
#ifndef MULTIPLE_HEAPS
#if defined (_DEBUG) && defined (VERIFY_HEAP)
BOOL gc_heap::verify_pinned_queue_p = FALSE;
-#endif // defined (_DEBUG) && defined (VERIFY_HEAP)
+#endif //_DEBUG && VERIFY_HEAP
uint8_t* gc_heap::oldest_pinned_plug = 0;
uint8_t* gc_heap::background_written_addresses [array_size+2];
-heap_segment* gc_heap::freeable_small_heap_segment = 0;
+heap_segment* gc_heap::freeable_soh_segment = 0;
size_t gc_heap::bgc_overflow_count = 0;
size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
bool gc_heap::use_large_pages_p = 0;
-size_t gc_heap::last_gc_index = 0;
#ifdef HEAP_BALANCE_INSTRUMENTATION
size_t gc_heap::last_gc_end_time_ms = 0;
#endif //HEAP_BALANCE_INSTRUMENTATION
-#ifdef SEG_MAPPING_TABLE
size_t gc_heap::min_segment_size = 0;
size_t gc_heap::min_segment_size_shr = 0;
-#endif //SEG_MAPPING_TABLE
size_t gc_heap::soh_segment_size = 0;
size_t gc_heap::min_uoh_segment_size = 0;
size_t gc_heap::segment_info_size = 0;
return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
}
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
// The array we allocate is organized as follows:
// 0th element is the address of the last array we allocated.
// starting from the 1st element are the segment addresses, that's
{
if (buck[ti-1].add <= add)
{
- // found the guy to remove
for (ptrdiff_t k = ti; k < count; k++)
buck[k-1] = buck[k];
count--;
{
if (buck[ti+1].add > add)
{
- // found the guy to remove
for (ptrdiff_t k = ti+1; k < count; k++)
buck[k-1] = buck[k];
count--;
count = 1;
buckets()[0].add = MAX_PTR;
}
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
-#ifdef SEG_MAPPING_TABLE
-#ifdef GROWABLE_SEG_MAPPING_TABLE
inline
uint8_t* align_on_segment (uint8_t* add)
{
{
return (size_t)add >> gc_heap::min_segment_size_shr;
}
-#else //GROWABLE_SEG_MAPPING_TABLE
-BOOL seg_mapping_table_init()
-{
-#ifdef HOST_64BIT
- uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
-#else
- uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
-#endif // HOST_64BIT
-
- size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
- seg_mapping_table = new seg_mapping[num_entries];
-
- if (seg_mapping_table)
- {
- memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
- dprintf (1, ("created %d entries for heap mapping (%Id bytes)",
- num_entries, (num_entries * sizeof (seg_mapping))));
- return TRUE;
- }
- else
- {
- dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)",
- num_entries, (num_entries * sizeof (seg_mapping))));
- return FALSE;
- }
-}
-#endif //GROWABLE_SEG_MAPPING_TABLE
#ifdef FEATURE_BASICFREEZE
inline
void seg_mapping_table_add_ro_segment (heap_segment* seg)
{
-#ifdef GROWABLE_SEG_MAPPING_TABLE
if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
return;
-#endif //GROWABLE_SEG_MAPPING_TABLE
for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
#endif //FEATURE_BASICFREEZE
+#ifdef TRACE_GC
if (seg)
{
if (in_range_for_segment (o, seg))
{
dprintf (2, ("could not find obj %Ix in any existing segments", o));
}
+#endif //TRACE_GC
#endif //_DEBUG
return hp;
gc_heap* seg_mapping_table_heap_of (uint8_t* o)
{
-#ifdef GROWABLE_SEG_MAPPING_TABLE
if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
return seg_mapping_table_heap_of_worker (o);
}
gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
{
-#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
+#ifdef FEATURE_BASICFREEZE
if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
-#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
+#endif //FEATURE_BASICFREEZE
return seg_mapping_table_heap_of_worker (o);
}
// Only returns a valid seg if we can actually find o on the seg.
heap_segment* seg_mapping_table_segment_of (uint8_t* o)
{
-#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
- if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
#ifdef FEATURE_BASICFREEZE
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return ro_segment_lookup (o);
-#else
- return 0;
#endif //FEATURE_BASICFREEZE
-#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
seg_mapping* entry = &seg_mapping_table[index];
return seg;
}
-#endif //SEG_MAPPING_TABLE
size_t gcard_of ( uint8_t*);
return ((ArrayBase *)this)->GetNumComponents();
}
- void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
+ void Validate(BOOL bDeep=TRUE)
{
- UNREFERENCED_PARAMETER(bVerifyNextHeader);
-
MethodTable * pMT = GetMethodTable();
_ASSERTE(pMT->SanityCheck());
}
}
- void ValidatePromote(ScanContext *sc, uint32_t flags)
+ void ValidateHeap(BOOL bDeep)
{
- UNREFERENCED_PARAMETER(sc);
- UNREFERENCED_PARAMETER(flags);
-
- Validate();
- }
-
- void ValidateHeap(Object *from, BOOL bDeep)
- {
- UNREFERENCED_PARAMETER(from);
-
- Validate(bDeep, FALSE);
+ Validate(bDeep);
}
#endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE
seg_size = initial_seg_size;
}
-#ifdef SEG_MAPPING_TABLE
#ifdef HOST_64BIT
seg_size = round_up_power2 (seg_size);
#else
seg_size = round_down_power2 (seg_size);
#endif // HOST_64BIT
-#endif //SEG_MAPPING_TABLE
return (seg_size);
}
if ((settings.pause_mode != pause_low_latency) &&
(settings.pause_mode != pause_no_gc)
#ifdef BACKGROUND_GC
- && (!recursive_gc_sync::background_running_p())
+ && (!gc_heap::background_running_p())
#endif //BACKGROUND_GC
)
{
}
#endif //BACKGROUND_GC
-#ifdef SEG_MAPPING_TABLE
if (result)
seg_mapping_table_add_segment (result, __this);
-#endif //SEG_MAPPING_TABLE
}
if (!result)
{
-#ifndef SEG_MAPPING_TABLE
- if (!seg_table->ensure_space_for_insert ())
- return 0;
-#endif //SEG_MAPPING_TABLE
void* mem = virtual_alloc (size);
if (!mem)
{
if (result)
{
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (result, __this);
-#else //SEG_MAPPING_TABLE
- gc_heap::seg_table->insert ((uint8_t*)result, delta);
-#endif //SEG_MAPPING_TABLE
}
}
return res;
}
-#if 0
-BOOL gc_heap::unprotect_segment (heap_segment* seg)
-{
- uint8_t* start = align_lower_page (heap_segment_mem (seg));
- ptrdiff_t region_size = heap_segment_allocated (seg) - start;
-
- if (region_size != 0 )
- {
- dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
-
- BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
- assert (status);
- return status;
- }
- return FALSE;
-}
-#endif
#ifdef MULTIPLE_HEAPS
#ifdef HOST_X86
static int select_heap(alloc_context* acontext)
{
+#ifndef TRACE_GC
UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf
+#endif //TRACE_GC
if (GCToOSInterface::CanGetCurrentProcessorNumber())
{
return true;
}
}
-#else
+#else //MULTIPLE_HEAPS && !FEATURE_REDHAWK
UNREFERENCED_PARAMETER(h_number);
-#endif
+#endif //MULTIPLE_HEAPS && !FEATURE_REDHAWK
//numa aware not enabled, or call failed --> fallback to VirtualCommit()
return GCToOSInterface::VirtualCommit(addr, size);
return decommit_succeeded_p;
}
-#ifndef SEG_MAPPING_TABLE
-inline
-heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
-{
- uint8_t* sadd = add;
- heap_segment* hs = 0;
- heap_segment* hs1 = 0;
- if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
- {
- delta = 0;
- return 0;
- }
- //repeat in case there is a concurrent insertion in the table.
- do
- {
- hs = hs1;
- sadd = add;
- seg_table->lookup (sadd);
- hs1 = (heap_segment*)sadd;
- } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));
-
- hs = hs1;
-
- if ((hs == 0) ||
- (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
- delta = 0;
- return hs;
-}
-#endif //SEG_MAPPING_TABLE
-
class mark
{
public:
elevation_reduced = FALSE;
found_finalizers = FALSE;
#ifdef BACKGROUND_GC
- background_p = recursive_gc_sync::background_running_p() != FALSE;
+ background_p = gc_heap::background_running_p() != FALSE;
allocations_allowed = TRUE;
#endif //BACKGROUND_GC
//for_gc_p indicates that the work is being done for GC,
//as opposed to concurrent heap verification
-void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
+void gc_heap::fix_youngest_allocation_area()
{
- UNREFERENCED_PARAMETER(for_gc_p);
-
// The gen 0 alloc context is never used for allocation in the allocator path. It's
// still used in the allocation path during GCs.
assert (generation_allocation_pointer (youngest_generation) == nullptr);
heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
}
-void gc_heap::fix_uoh_allocation_area (BOOL for_gc_p)
+void gc_heap::fix_uoh_allocation_area()
{
- UNREFERENCED_PARAMETER(for_gc_p);
-
for (int i = uoh_start_generation; i < total_generation_count; i++)
{
#ifdef _DEBUG
args.heap = __this;
GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
- fix_youngest_allocation_area(for_gc_p);
- fix_uoh_allocation_area(for_gc_p);
+ fix_youngest_allocation_area();
+ fix_uoh_allocation_area();
}
void gc_heap::fix_older_allocation_area (generation* older_gen)
set_allocation_heap_segment (gen);
}
-#ifdef BACKGROUND_GC
-//TODO BACKGROUND_GC this is for test only
-void
-gc_heap::disallow_new_allocation (int gen_number)
-{
- UNREFERENCED_PARAMETER(gen_number);
- settings.allocations_allowed = FALSE;
-}
-void
-gc_heap::allow_new_allocation (int gen_number)
-{
- UNREFERENCED_PARAMETER(gen_number);
- settings.allocations_allowed = TRUE;
-}
-
-#endif //BACKGROUND_GC
-
bool gc_heap::new_allocation_allowed (int gen_number)
{
#ifdef BACKGROUND_GC
}
}
-void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
-{
- dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
- dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
- if (!(pinned_plug_que_empty_p()))
- {
- mark* oldest_entry = oldest_pin();
- uint8_t* plug = pinned_plug (oldest_entry);
- if ((plug >= alloc_pointer) && (plug < alloc_limit))
- {
- alloc_limit = pinned_plug (oldest_entry);
- dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
- alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
- }
- }
-}
-
void gc_heap::set_allocator_next_pin (generation* gen)
{
dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
}
// After we set the info, we increase tos.
-void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
-{
- UNREFERENCED_PARAMETER(last_pinned_plug);
-
- mark& m = mark_stack_array[mark_stack_tos];
- assert (m.first == last_pinned_plug);
-
- m.len = plug_len;
- mark_stack_tos++;
- set_allocator_next_pin (alloc_pointer, alloc_limit);
-}
-
-// After we set the info, we increase tos.
void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
{
+#ifndef _DEBUG
UNREFERENCED_PARAMETER(last_pinned_plug);
+#endif //_DEBUG
mark& m = mark_stack_array[mark_stack_tos];
assert (m.first == last_pinned_plug);
// mark_array is always at the end of the data structure because we
// want to be able to make one commit call for everything before it.
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
uint32_t* mark_array;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
size_t size;
uint32_t* next_card_table;
}
#endif //CARD_BUNDLE
-#ifdef MARK_ARRAY
-/* Support for mark_array */
-
+#ifdef BACKGROUND_GC
inline
uint32_t*& card_table_mark_array (uint32_t* c_table)
{
if ((end <= current_highest_address) && (from >= current_lowest_address))
{
size_t beg_word = mark_word_of (align_on_mark_word (from));
- MAYBE_UNUSED_VAR(beg_word);
//align end word to make sure to cover the address
size_t end_word = mark_word_of (align_on_mark_word (end));
- MAYBE_UNUSED_VAR(end_word);
dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
(size_t)mark_word_address (beg_word),
(size_t)mark_word_address (end_word),
#endif //_DEBUG
}
}
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
//These work on untranslated card tables
inline
size_t bs = size_brick_of (start, end);
size_t cs = size_card_of (start, end);
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
size_t ms = (gc_can_use_concurrent ?
size_mark_array_of (start, end) :
0);
#else
size_t ms = 0;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
size_t cb = 0;
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-#ifdef GROWABLE_SEG_MAPPING_TABLE
size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
st += (st_table_offset_aligned - st_table_offset);
-#else //GROWABLE_SEG_MAPPING_TABLE
- size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
// it is impossible for alloc_size to overflow due bounds on each of
// its components.
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-#ifdef GROWABLE_SEG_MAPPING_TABLE
seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
-#endif //GROWABLE_SEG_MAPPING_TABLE
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
if (gc_can_use_concurrent)
card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
else
card_table_mark_array (ct) = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
return translate_card_table(ct);
}
size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
size_t ms = (gc_heap::gc_can_use_concurrent ?
size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
0);
#else
size_t ms = 0;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
size_t cb = 0;
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-#ifdef GROWABLE_SEG_MAPPING_TABLE
size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
st += (st_table_offset_aligned - st_table_offset);
-#else //GROWABLE_SEG_MAPPING_TABLE
- size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
// it is impossible for alloc_size to overflow due bounds on each of
// its components.
memset(card_table_card_bundle_table (ct), 0xFF, cb);
#endif //CARD_BUNDLE
-#ifdef GROWABLE_SEG_MAPPING_TABLE
- {
- new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
- new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
- size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
- memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
- &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
- size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
+ new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
+ new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
+ size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
+ memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
- // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
- // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
- // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
- // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
- // if an OOM occurs.
- }
-#endif //GROWABLE_SEG_MAPPING_TABLE
+ // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
+ // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
+ // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
+ // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
+ // if an OOM occurs.
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
if(gc_can_use_concurrent)
card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
else
card_table_mark_array (ct) = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
translated_ct = translate_card_table (ct);
return 0;
fail:
- //cleanup mess and return -1;
-
if (mem)
{
assert(g_gc_card_table == saved_g_card_table);
//copy all of the arrays managed by the card table for a page aligned range
void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
short* old_brick_table,
- heap_segment* seg,
uint8_t* start, uint8_t* end)
{
ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
}
uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
-#ifdef MARK_ARRAY
-#ifdef BACKGROUND_GC
- UNREFERENCED_PARAMETER(seg);
- if (recursive_gc_sync::background_running_p())
+
+ if (gc_heap::background_running_p())
{
uint32_t* old_mark_array = card_table_mark_array (old_ct);
assert (old_brick_table == 0);
}
}
-#else //BACKGROUND_GC
- assert (seg != 0);
- clear_mark_array (start, heap_segment_committed(seg));
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
// n way merge with all of the card table ever used in between
uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);
set_brick (b, -1);
}
-#ifdef MARK_ARRAY
- if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
+#ifdef BACKGROUND_GC
+ if (gc_heap::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
{
assert (seg != 0);
clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
}
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
clear_card_for_addresses (heap_segment_mem (seg),
heap_segment_allocated (seg));
void gc_heap::copy_brick_card_table()
{
- uint8_t* la = lowest_address;
- uint8_t* ha = highest_address;
- MAYBE_UNUSED_VAR(ha);
uint32_t* old_card_table = card_table;
short* old_brick_table = brick_table;
+ uint8_t* la = lowest_address;
+#ifdef _DEBUG
+ uint8_t* ha = highest_address;
assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
+#endif //_DEBUG
/* todo: Need a global lock for this */
uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
brick_table = card_table_brick_table (ct);
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
if (gc_can_use_concurrent)
{
mark_array = translate_mark_array (card_table_mark_array (ct));
}
else
mark_array = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
#ifdef CARD_BUNDLE
-#if defined(MARK_ARRAY) && defined(_DEBUG)
- size_t cb_end = (size_t)((uint8_t*)card_table_card_bundle_table (ct) + size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address));
-#ifdef GROWABLE_SEG_MAPPING_TABLE
- size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
- size_t cb_end_aligned = align_for_seg_mapping_table (cb_end);
- st += (cb_end_aligned - cb_end);
-#else //GROWABLE_SEG_MAPPING_TABLE
- size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
-#endif //MARK_ARRAY && _DEBUG
card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
// Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
uint8_t* end = align_on_page (heap_segment_allocated (seg));
copy_brick_card_range (la, old_card_table,
(i < uoh_start_generation) ? old_brick_table : NULL,
- seg,
align_lower_page (heap_segment_mem (seg)),
end);
}
seg_table->insert (heap_segment_mem(seg), (size_t)seg);
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_ro_segment (seg);
-#endif //SEG_MAPPING_TABLE
- //test if in range
if ((heap_segment_reserved (seg) > lowest_address) &&
(heap_segment_mem (seg) < highest_address))
{
void gc_heap::remove_ro_segment (heap_segment* seg)
{
//clear the mark bits so a new segment allocated in its place will have a clear mark bits
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
if (gc_can_use_concurrent)
{
clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
false); // read_only segments need the mark clear
}
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
enter_spin_lock (&gc_heap::gc_lock);
seg_table->remove ((uint8_t*)seg);
-
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_remove_ro_segment (seg);
-#endif //SEG_MAPPING_TABLE
// Locate segment (and previous segment) in the list.
generation* gen2 = generation_of (max_generation);
BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
{
- //set it in range
seg->flags |= heap_segment_flags_inrange;
-// init_brick_card_range (seg);
ro_segments_in_range = TRUE;
- //right now, segments aren't protected
- //unprotect_segment (seg);
return TRUE;
}
gc_heap* heap;
// find the heap x points into - searching cyclically from the last heap,
// because in many cases the right heap is the next one or comes soon after
+#ifdef _DEBUG
int last_heap_num = heap_num;
- MAYBE_UNUSED_VAR(last_heap_num);
+#endif //_DEBUG
do
{
heap_num++;
#define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
#endif //COLLECTIBLE_CLASS
-#if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
+#ifdef BACKGROUND_GC
inline
void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
{
clear_batch_mark_array_bits (start, end);
}
}
-
-void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
-{
- dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix",
- from, end));
- int align_const = get_alignment_constant (!loh_p);
-
- uint8_t* o = from;
-
- while (o < end)
- {
- uint8_t* next_o = o + Align (size (o), align_const);
-
- if (background_object_marked (o, TRUE))
- {
- dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
- }
-
- o = next_o;
- }
-}
-#endif //MARK_ARRAY && BACKGROUND_GC
+#endif //BACKGROUND_GC
inline
BOOL gc_heap::is_mark_set (uint8_t* o)
{
size_t initial_commit = SEGMENT_INITIAL_COMMIT;
- //Commit the first page
if (!virtual_commit (new_pages, initial_commit, h_number))
{
return 0;
}
- //overlay the heap_segment
heap_segment* new_segment = (heap_segment*)new_pages;
uint8_t* start = new_pages + segment_info_size;
decommit_heap_segment (seg);
}
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_remove_segment (seg);
-#endif //SEG_MAPPING_TABLE
heap_segment_next (seg) = segment_standby_list;
segment_standby_list = seg;
decommit_mark_array_by_seg (seg);
#endif //BACKGROUND_GC
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_remove_segment (seg);
-#else //SEG_MAPPING_TABLE
- seg_table->remove ((uint8_t*)seg);
-#endif //SEG_MAPPING_TABLE
-
release_segment (seg);
}
}
#ifdef BACKGROUND_GC
void gc_heap::rearrange_small_heap_segments()
{
- heap_segment* seg = freeable_small_heap_segment;
+ heap_segment* seg = freeable_soh_segment;
while (seg)
{
heap_segment* next_seg = heap_segment_next (seg);
delete_heap_segment (seg, FALSE);
seg = next_seg;
}
- freeable_small_heap_segment = 0;
+ freeable_soh_segment = 0;
}
#endif //BACKGROUND_GC
#ifdef WRITE_WATCH
-
uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch
#ifdef CARD_BUNDLE
fire_alloc_wait_event (awr, FALSE);
}
#endif //BACKGROUND_GC
-void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start, uint8_t* pointer)
+void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start)
{
generation* gen = generation_of (gen_num);
gen->gen_num = gen_num;
gen->allocation_start = start;
- gen->allocation_context.alloc_ptr = pointer;
- gen->allocation_context.alloc_limit = pointer;
+ gen->allocation_context.alloc_ptr = 0;
+ gen->allocation_context.alloc_limit = 0;
gen->allocation_context.alloc_bytes = 0;
gen->allocation_context.alloc_bytes_uoh = 0;
- gen->allocation_context_start_region = pointer;
+ gen->allocation_context_start_region = 0;
gen->start_segment = seg;
gen->allocation_segment = seg;
gen->plan_allocation_start = 0;
}
#endif //GC_CONFIG_DRIVEN
-#ifdef GC_STATS
- GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
- if (logFileName.Get() != nullptr)
- {
- GCStatistics::logFileName = _strdup(logFileName.Get());
- GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
- if (!GCStatistics::logFile)
- {
- return E_FAIL;
- }
- }
-#endif // GC_STATS
-
HRESULT hres = S_OK;
#ifdef WRITE_WATCH
}
#endif //MARK_LIST
-#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
- if (!seg_mapping_table_init())
- goto cleanup;
-#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
-
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
seg_table = sorted_table::make_sorted_table();
if (!seg_table)
goto cleanup;
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
segment_standby_list = 0;
memset (full_gc_counts, 0, sizeof (full_gc_counts));
- last_gc_index = 0;
should_expand_in_full_gc = FALSE;
#ifdef FEATURE_LOH_COMPACTION
spinlock_info_index = 0;
}
#else
- MAYBE_UNUSED_VAR(enter_state);
- MAYBE_UNUSED_VAR(take_state);
+ UNREFERENCED_PARAMETER(enter_state);
+ UNREFERENCED_PARAMETER(take_state);
#endif //SPINLOCK_HISTORY
}
gc_done_event_lock = -1;
gc_done_event_set = false;
-#ifndef SEG_MAPPING_TABLE
- if (!gc_heap::seg_table->ensure_space_for_insert ())
- {
- return 0;
- }
-#endif //!SEG_MAPPING_TABLE
-
heap_segment* seg = make_initial_segment (soh_gen0, h_number);
if (!seg)
return 0;
(size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
gc_etw_segment_small_object_heap);
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (seg, __this);
-#else //SEG_MAPPING_TABLE
- seg_table->insert ((uint8_t*)seg, sdelta);
-#endif //SEG_MAPPING_TABLE
#ifdef MULTIPLE_HEAPS
heap_segment_heap (seg) = this;
card_table_card_bundle_table (ct));
#endif //CARD_BUNDLE
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
if (gc_can_use_concurrent)
mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
else
mark_array = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
uint8_t* start = heap_segment_mem (seg);
for (int i = max_generation; i >= 0; i--)
{
- make_generation (i, seg, start, 0);
+ make_generation (i, seg, start);
start += Align (min_obj_size);
}
ephemeral_heap_segment = seg;
-#ifndef SEG_MAPPING_TABLE
- if (!gc_heap::seg_table->ensure_space_for_insert ())
- {
- return 0;
- }
-#endif //!SEG_MAPPING_TABLE
-
-
// Create segments for the large and pinned generations
heap_segment* lseg = make_initial_segment(loh_generation, h_number);
if (!lseg)
(size_t)(heap_segment_reserved (pseg) - heap_segment_mem(pseg)),
gc_etw_segment_pinned_object_heap);
-#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (lseg, __this);
seg_mapping_table_add_segment (pseg, __this);
-#else //SEG_MAPPING_TABLE
- seg_table->insert ((uint8_t*)lseg, sdelta);
- seg_table->insert ((uint8_t*)pseg, sdelta);
-#endif //SEG_MAPPING_TABLE
- make_generation (loh_generation, lseg, heap_segment_mem (lseg), 0);
- make_generation (poh_generation, pseg, heap_segment_mem (pseg), 0);
+ make_generation (loh_generation, lseg, heap_segment_mem (lseg));
+ make_generation (poh_generation, pseg, heap_segment_mem (pseg));
heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
#endif //MULTIPLE_HEAPS
- //Do this only once
-#ifdef MULTIPLE_HEAPS
- if (h_number == 0)
-#endif //MULTIPLE_HEAPS
- {
-#ifndef INTERIOR_POINTERS
- //set the brick_table for large objects
- //but default value is cleared
- //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
- // (uint8_t*)heap_segment_reserved (lseg));
-
-#else //INTERIOR_POINTERS
-
- //Because of the interior pointer business, we have to clear
- //the whole brick table
- //but the default value is cleared
- // clear_brick_table (lowest_address, highest_address);
-#endif //INTERIOR_POINTERS
- }
-
if (!init_dynamic_data())
{
return 0;
loh_a_bgc_planning = 0;
bgc_maxgen_end_fl_size = 0;
#endif //BGC_SERVO_TUNING
- freeable_small_heap_segment = 0;
+ freeable_soh_segment = 0;
gchist_index_per_heap = 0;
uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
if (!b_arr)
);
}
-#ifdef MARK_ARRAY
- // why would we clear the mark array for this page? it should be cleared..
- // clear the first committed page
- //if(gc_can_use_concurrent)
- //{
- // clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
- //}
-#endif //MARK_ARRAY
-
#ifdef MULTIPLE_HEAPS
get_proc_and_numa_for_heap (heap_number);
if (!create_gc_thread ())
}
bgc_alloc_lock->init();
-
- if (h_number == 0)
- {
- if (!recursive_gc_sync::init())
- return 0;
- }
-
bgc_thread_running = 0;
bgc_thread = 0;
bgc_threads_timeout_cs.Initialize();
delete g_mark_list;
#endif //MARK_LIST
-#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
if (seg_mapping_table)
delete seg_mapping_table;
-#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
//destroy the segment map
seg_table->delete_sorted_table();
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
}
void
bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_number, hard_limit_exceeded_p);
if (ret)
{
-#ifdef MARK_ARRAY
-#ifndef BACKGROUND_GC
- clear_mark_array (heap_segment_committed (seg),
- heap_segment_committed (seg)+c_size, TRUE);
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
heap_segment_committed (seg) += c_size;
STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix\n",
}
//used only in older generation allocation (i.e during gc).
-void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
- int gennum)
+void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen)
{
- UNREFERENCED_PARAMETER(gennum);
dprintf (3, ("gc Expanding segment allocation"));
heap_segment* seg = generation_allocation_segment (gen);
if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
//memory. Let's catch up here
if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
{
-#ifdef MARK_ARRAY
-#ifndef BACKGROUND_GC
- clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
heap_segment_used (seg) = alloc_allocated - plug_skew;
}
}
}
}
-void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
+void gc_heap::handle_oom (oom_reason reason, size_t alloc_size,
uint8_t* allocated, uint8_t* reserved)
{
- UNREFERENCED_PARAMETER(heap_num);
-
if (reason == oom_budget)
{
alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
// by the time the GC happens, we may not be under BGC anymore. If we try to
// predict often enough it should be ok.
if ((n == max_generation) &&
- (recursive_gc_sync::background_running_p()))
+ (gc_heap::background_running_p()))
{
n = max_generation - 1;
dprintf (2, ("FGN: bgc - 1 instead of 2"));
// DTREVIEW - we should check this in dt_low_ephemeral_space_p
// as well.
inline
-BOOL gc_heap::short_on_end_of_seg (int gen_number,
- heap_segment* seg,
- int align_const)
+BOOL gc_heap::short_on_end_of_seg (heap_segment* seg, int align_const)
{
- UNREFERENCED_PARAMETER(gen_number);
uint8_t* allocated = heap_segment_allocated(seg);
BOOL sufficient_p = sufficient_space_end_seg (allocated,
void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p)
{
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
uint32_t memory_load;
get_memory_info (&memory_load);
{
if (short_seg_end_p)
{
- *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
+ *short_seg_end_p = short_on_end_of_seg (ephemeral_heap_segment, align_const);
}
// If the caller doesn't care, we always try to fit at the end of seg;
// otherwise we would only try if we are actually not short at end of seg.
int align_const)
{
#if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
background_soh_alloc_count++;
if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
align_const, &commit_failed_p,
&short_seg_end_p);
#ifdef BACKGROUND_GC
- bgc_in_progress_p = recursive_gc_sync::background_running_p();
+ bgc_in_progress_p = gc_heap::background_running_p();
#endif //BACKGROUND_GC
if (can_use_existing_p)
if (soh_alloc_state == a_state_cant_allocate)
{
assert (oom_r != oom_no_failure);
- handle_oom (heap_number,
- oom_r,
+ handle_oom (oom_r,
size,
heap_segment_allocated (ephemeral_heap_segment),
heap_segment_reserved (ephemeral_heap_segment));
size_t gc_heap::get_uoh_seg_size (size_t size)
{
size_t default_seg_size = min_uoh_segment_size;
-#ifdef SEG_MAPPING_TABLE
size_t align_size = default_seg_size;
-#else //SEG_MAPPING_TABLE
- size_t align_size = default_seg_size / 2;
-#endif //SEG_MAPPING_TABLE
int align_const = get_alignment_constant (FALSE);
size_t large_seg_size = align_on_page (
max (default_seg_size,
BOOL gc_heap::uoh_get_new_seg (int gen_number,
size_t size,
- int align_const,
BOOL* did_full_compact_gc,
oom_reason* oom_r)
{
- UNREFERENCED_PARAMETER(align_const);
-
*did_full_compact_gc = FALSE;
size_t seg_size = get_uoh_seg_size (size);
BOOL bgc_in_progress = FALSE;
*did_full_compact_gc = FALSE;
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
bgc_in_progress = TRUE;
size_t last_full_compact_gc_count = get_full_compact_gc_count();
commit_failed_p, oom_r);
#ifdef BACKGROUND_GC
- if (can_allocate && recursive_gc_sync::background_running_p())
+ if (can_allocate && gc_heap::background_running_p())
{
if (gen_number == poh_generation)
{
}
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p);
dprintf (2, ("waited for BGC - done"));
{
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
#ifdef BGC_SERVO_TUNING
bool planning_p = (current_c_gc_state == c_gc_state_planning);
current_full_compact_gc_count = get_full_compact_gc_count();
- can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r);
+ can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r);
uoh_alloc_state = (can_get_new_seg_p ?
a_state_try_fit_new_seg :
(did_full_compacting_gc ?
current_full_compact_gc_count = get_full_compact_gc_count();
- can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r);
+ can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r);
// Since we release the msl before we try to allocate a seg, other
// threads could have allocated a bunch of segments before us so
// we might need to retry.
current_full_compact_gc_count = get_full_compact_gc_count();
- can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r);
+ can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r);
uoh_alloc_state = (can_get_new_seg_p ?
a_state_try_fit_new_seg :
(did_full_compacting_gc ?
}
else
{
- handle_oom (heap_number,
- oom_r,
+ handle_oom (oom_r,
size,
0,
0);
void gc_heap::leave_allocation_segment (generation* gen)
{
- adjust_limit (0, 0, gen, max_generation);
+ adjust_limit (0, 0, gen);
}
void gc_heap::init_free_and_plug()
generation_free_list_space (gen) -= free_list_size;
remove_gen_free (gen->gen_num, free_list_size);
- adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
+ adjust_limit (free_list, free_list_size, gen);
generation_allocate_end_seg_p (gen) = FALSE;
goto finished;
}
{
dprintf (3, ("using what's left in committed"));
adjust_limit (heap_segment_plan_allocated (seg),
- heap_segment_committed (seg) -
- heap_segment_plan_allocated (seg),
- gen, from_gen_number+1);
+ (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)),
+ gen);
generation_allocate_end_seg_p (gen) = TRUE;
// dformat (t, 3, "Expanding segment allocation");
heap_segment_plan_allocated (seg) =
{
dprintf (3, ("using what's left in reserved"));
adjust_limit (heap_segment_plan_allocated (seg),
- heap_segment_committed (seg) -
- heap_segment_plan_allocated (seg),
- gen, from_gen_number+1);
+ (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)),
+ gen);
generation_allocate_end_seg_p (gen) = TRUE;
heap_segment_plan_allocated (seg) =
heap_segment_committed (seg);
*blocking_collection_p = TRUE;
}
- if ((n < max_generation) && !recursive_gc_sync::background_running_p() &&
+ if ((n < max_generation) && !gc_heap::background_running_p() &&
bgc_tuning::stepping_trigger (settings.entry_memory_load, get_current_gc_index (max_generation)))
{
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_initial);
g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
{
#ifndef FEATURE_REDHAWK
- // for the GC stress mix mode throttle down gen2 collections
- if (g_pConfig->IsGCStressMix())
- {
- size_t current_gc_count = 0;
-
-#ifdef MULTIPLE_HEAPS
- current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
-#else
- current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
-#endif //MULTIPLE_HEAPS
- // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
- if ((current_gc_count % 10) == 0)
- {
- gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_stress_mix);
- n = max_generation;
- }
- }
- // for traditional GC stress
- else if (*blocking_collection_p)
+ if (*blocking_collection_p)
{
// We call StressHeap() a lot for Concurrent GC Stress. However,
// if we can not do a concurrent collection, no need to stress anymore.
temp_gen = n;
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p()
+ if (gc_heap::background_running_p()
#ifdef BGC_SERVO_TUNING
|| bgc_tuning::fl_tuning_triggered
|| (bgc_tuning::enable_fl_tuning && bgc_tuning::use_stepping_trigger_p)
void gc_heap::concurrent_print_time_delta (const char* msg)
{
- UNREFERENCED_PARAMETER(msg);
#ifdef TRACE_GC
size_t current_time = GetHighPrecisionTimeStamp();
size_t elapsed_time = current_time - time_bgc_last;
time_bgc_last = current_time;
dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
+#else
+ UNREFERENCED_PARAMETER(msg);
#endif //TRACE_GC
}
void gc_heap::free_list_info (int gen_num, const char* msg)
{
- UNREFERENCED_PARAMETER(gen_num);
- UNREFERENCED_PARAMETER(msg);
#if defined (BACKGROUND_GC) && defined (TRACE_GC)
dprintf (3, ("h%d: %s", heap_number, msg));
for (int i = 0; i < total_generation_count; i++)
generation_free_obj_space (gen)));
}
}
+#else
+ UNREFERENCED_PARAMETER(gen_num);
+ UNREFERENCED_PARAMETER(msg);
#endif // BACKGROUND_GC && TRACE_GC
}
concurrent_print_time_delta ("BGC");
-//#ifdef WRITE_WATCH
- //reset_write_watch (FALSE);
-//#endif //WRITE_WATCH
-
concurrent_print_time_delta ("RW");
background_mark_phase();
free_list_info (max_generation, "after mark phase");
}
#endif // BACKGROUND_GC
-#ifdef GC_STATS
- if (GCStatistics::Enabled() && heap_number == 0)
- g_GCStatistics.AddGCStats(settings,
- dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
-#endif // GC_STATS
-
#ifdef BACKGROUND_GC
assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC
bgc_threads_sync_event.Wait(INFINITE, FALSE);
dprintf (2, ("bgc_threads_sync_event is signalled"));
}
-#else
+#else //MULTIPLE_HEAPS
suspend_EE();
#endif //MULTIPLE_HEAPS
bgc_threads_sync_event.Wait(INFINITE, FALSE);
dprintf (2, ("bgc_threads_sync_event is signalled"));
}
-#else
+#else //MULTIPLE_HEAPS
restart_EE();
#endif //MULTIPLE_HEAPS
}
#endif //BACKGROUND_GC
}
-#endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
+#endif //VERIFY_HEAP || (FEATURE_EVENT_TRACE && BACKGROUND_GC)
#ifdef MULTIPLE_HEAPS
if (!settings.concurrent)
heap_select::mark_heap (heap_number);
}
-#else
+#else //MULTIPLE_HEAPS
gc_data_global.final_youngest_desired =
dd_desired_allocation (dynamic_data_of (0));
for (int i = condemned_gen_number; i >= 0; i--)
{
size_t gen_start_size = Align (min_obj_size);
- make_generation (i, ephemeral_heap_segment, start, 0);
+ make_generation (i, ephemeral_heap_segment, start);
generation* gen = generation_of (i);
generation_plan_allocation_start (gen) = start;
#ifdef MULTIPLE_HEAPS
gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
if (gc_t_join.joined())
- {
#endif //MULTIPLE_HEAPS
-
+ {
check_and_set_no_gc_oom();
#ifdef MULTIPLE_HEAPS
gc_t_join.restart();
- }
#endif //MULTIPLE_HEAPS
+ }
}
if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
#ifdef MULTIPLE_HEAPS
gc_t_join.join(this, gc_join_final_no_gc);
if (gc_t_join.joined())
- {
#endif //MULTIPLE_HEAPS
-
+ {
check_and_set_no_gc_oom();
if (current_no_gc_region_info.start_status == start_no_gc_success)
#ifdef MULTIPLE_HEAPS
gc_t_join.restart();
- }
#endif //MULTIPLE_HEAPS
+ }
}
void gc_heap::init_records()
#ifdef MULTIPLE_HEAPS
gc_t_join.join(this, gc_join_minimal_gc);
if (gc_t_join.joined())
- {
#endif //MULTIPLE_HEAPS
-
+ {
#ifdef MULTIPLE_HEAPS
// this is serialized because we need to get a segment
for (int i = 0; i < n_heaps; i++)
#ifdef MULTIPLE_HEAPS
gc_t_join.restart();
- }
#endif //MULTIPLE_HEAPS
+ }
goto done;
}
//align all heaps on the max generation to condemn
dprintf (3, ("Joining for max generation to condemn"));
condemned_generation_num = generation_to_condemn (n,
- &blocking_collection,
- &elevation_requested,
- FALSE);
+ &blocking_collection,
+ &elevation_requested,
+ FALSE);
gc_t_join.join(this, gc_join_generation_determined);
if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
{
-#if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
- //delete old slots from the segment table
+#ifdef FEATURE_BASICFREEZE
seg_table->delete_old_slots();
-#endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
#ifdef MULTIPLE_HEAPS
for (int i = 0; i < n_heaps; i++)
hp->rearrange_uoh_segments();
#ifdef BACKGROUND_GC
hp->background_delay_delete_uoh_segments();
- if (!recursive_gc_sync::background_running_p())
+ if (!gc_heap::background_running_p())
hp->rearrange_small_heap_segments();
#endif //BACKGROUND_GC
}
rearrange_uoh_segments();
#ifdef BACKGROUND_GC
background_delay_delete_uoh_segments();
- if (!recursive_gc_sync::background_running_p())
+ if (!gc_heap::background_running_p())
rearrange_small_heap_segments();
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS
#ifdef BACKGROUND_GC
if ((settings.condemned_generation == max_generation) &&
- (recursive_gc_sync::background_running_p()))
+ (gc_heap::background_running_p()))
{
//TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
// because we have to collect 0 and 1 properly
#endif //MULTIPLE_HEAPS
// Call the EE for start of GC work
- // just one thread for MP GC
GCToEEInterface::GcStartWork (settings.condemned_generation,
max_generation);
#ifdef MULTIPLE_HEAPS
gc_start_event.Reset();
- //start all threads on the roots.
dprintf(3, ("Starting all gc threads for gc"));
gc_t_join.restart();
#endif //MULTIPLE_HEAPS
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = g_heaps[i];
- if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
+ if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init())
{
do_concurrent_p = FALSE;
break;
}
}
#else
- do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
+ do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init());
if (do_concurrent_p)
{
background_saved_lowest_address = lowest_address;
int gen = check_for_ephemeral_alloc();
// always do a gen1 GC before we start BGC.
- // This is temporary for testing purpose.
- //int gen = max_generation - 1;
dont_restart_ee_p = TRUE;
if (gen == -1)
{
#endif //MULTIPLE_HEAPS
}
-#ifdef INTERIOR_POINTERS
heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
{
-#ifdef SEG_MAPPING_TABLE
heap_segment* seg = seg_mapping_table_segment_of (interior);
if (seg)
{
return 0;
}
return seg;
-#else //SEG_MAPPING_TABLE
-#ifdef MULTIPLE_HEAPS
- for (int i = 0; i < gc_heap::n_heaps; i++)
- {
- gc_heap* h = gc_heap::g_heaps [i];
- hs = h->find_segment_per_heap (o, small_segment_only_p);
- if (hs)
- {
- break;
- }
- }
-#else
- {
- gc_heap* h = pGenGCHeap;
- hs = h->find_segment_per_heap (o, small_segment_only_p);
- }
-#endif //MULTIPLE_HEAPS
-#endif //SEG_MAPPING_TABLE
-}
-
-heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
-{
-#ifdef SEG_MAPPING_TABLE
- return find_segment (interior, small_segment_only_p);
-#else //SEG_MAPPING_TABLE
- if (in_range_for_segment (interior, ephemeral_heap_segment))
- {
- return ephemeral_heap_segment;
- }
- else
- {
- heap_segment* found_seg = 0;
-
- {
- heap_segment* seg = generation_start_segment (generation_of (max_generation));
- do
- {
- if (in_range_for_segment (interior, seg))
- {
- found_seg = seg;
- goto end_find_segment;
- }
-
- } while ((seg = heap_segment_next (seg)) != 0);
- }
- if (!small_segment_only_p)
- {
-#ifdef BACKGROUND_GC
- {
- ptrdiff_t delta = 0;
- heap_segment* seg = segment_of (interior, delta);
- if (seg && in_range_for_segment (interior, seg))
- {
- found_seg = seg;
- }
- goto end_find_segment;
- }
-#else //BACKGROUND_GC
- for (int i = uoh_start_generation; i < total_generation_count; i++)
- {
- heap_segment* seg = generation_start_segment (generation_of (i));
- do
- {
- if (in_range_for_segment(interior, seg))
- {
- found_seg = seg;
- goto end_find_segment;
- }
-
- } while ((seg = heap_segment_next (seg)) != 0);
- }
-#endif //BACKGROUND_GC
- }
-end_find_segment:
-
- return found_seg;
- }
-#endif //SEG_MAPPING_TABLE
}
-#endif //INTERIOR_POINTERS
#if !defined(_DEBUG) && !defined(__GNUC__)
inline // This causes link errors if global optimization is off
#ifdef MULTIPLE_HEAPS
if (o == 0)
return g_heaps [0];
-#ifdef SEG_MAPPING_TABLE
gc_heap* hp = seg_mapping_table_heap_of (o);
return (hp ? hp : g_heaps[0]);
-#else //SEG_MAPPING_TABLE
- ptrdiff_t delta = 0;
- heap_segment* seg = segment_of (o, delta);
- return (seg ? heap_segment_heap (seg) : g_heaps [0]);
-#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
UNREFERENCED_PARAMETER(o);
return __this;
#ifdef MULTIPLE_HEAPS
if (o == 0)
return g_heaps [0];
-#ifdef SEG_MAPPING_TABLE
gc_heap* hp = seg_mapping_table_heap_of_gc (o);
return (hp ? hp : g_heaps[0]);
-#else //SEG_MAPPING_TABLE
- ptrdiff_t delta = 0;
- heap_segment* seg = segment_of (o, delta);
- return (seg ? heap_segment_heap (seg) : g_heaps [0]);
-#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
UNREFERENCED_PARAMETER(o);
return __this;
#endif //MULTIPLE_HEAPS
}
-#ifdef INTERIOR_POINTERS
// will find all heap objects (large and small)
uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
{
int brick_entry = get_brick_entry(brick_of (interior));
if (brick_entry == 0)
{
- // this is a pointer to a large object
- heap_segment* seg = find_segment_per_heap (interior, FALSE);
+ // this is a pointer to a UOH object
+ heap_segment* seg = find_segment (interior, FALSE);
if (seg
#ifdef FEATURE_CONSERVATIVE_GC
&& (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
|| (GCConfig::GetConservativeGC() && !heap_segment_uoh_p (seg))
#endif
);
- //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
assert (interior < heap_segment_allocated (seg));
uint8_t* o = heap_segment_mem (seg);
}
else if (interior >= low)
{
- heap_segment* seg = find_segment_per_heap (interior, TRUE);
+ heap_segment* seg = find_segment (interior, TRUE);
if (seg)
{
#ifdef FEATURE_CONSERVATIVE_GC
return 0;
}
-uint8_t*
-gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
-{
- uint8_t* old_address = interior;
- if (!((old_address >= low) && (old_address < high)))
- return 0;
- uint8_t* plug = 0;
- size_t brick = brick_of (old_address);
- int brick_entry = brick_table [ brick ];
- if (brick_entry != 0)
- {
- retry:
- {
- while (brick_entry < 0)
- {
- brick = (brick + brick_entry);
- brick_entry = brick_table [ brick ];
- }
- uint8_t* old_loc = old_address;
- uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
- old_loc);
- if (node <= old_loc)
- plug = node;
- else
- {
- brick = brick - 1;
- brick_entry = brick_table [ brick ];
- goto retry;
- }
-
- }
- assert (plug);
- //find the object by going along the plug
- uint8_t* o = plug;
- while (o <= interior)
- {
- uint8_t* next_o = o + Align (size (o));
- assert (next_o > o);
- if (next_o > interior)
- {
- break;
- }
- o = next_o;
- }
- assert ((o <= interior) && ((o + Align (size (o))) > interior));
- return o;
- }
- else
- {
- // this is a pointer to a large object
- heap_segment* seg = find_segment_per_heap (interior, FALSE);
- if (seg)
- {
- assert (interior < heap_segment_allocated (seg));
-
- uint8_t* o = heap_segment_mem (seg);
- while (o < heap_segment_allocated (seg))
- {
- uint8_t* next_o = o + Align (size (o));
- assert (next_o > o);
- if ((o < interior) && (interior < next_o))
- return o;
- o = next_o;
- }
- return 0;
- }
- else
- {
- return 0;
- }
- }
-}
-#else //INTERIOR_POINTERS
-inline
-uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
-{
- return o;
-}
-#endif //INTERIOR_POINTERS
-
#ifdef MULTIPLE_HEAPS
#ifdef MARK_LIST
#ifdef MULTIPLE_HEAPS
else if (o)
{
- //find the heap
gc_heap* hp = heap_of_gc (o);
assert (hp);
if ((o >= hp->gc_low) && (o < hp->gc_high))
#ifdef MULTIPLE_HEAPS
else if (o)
{
- //find the heap
gc_heap* hp = heap_of (o);
assert (hp);
if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
{
+#ifndef _DEBUG
UNREFERENCED_PARAMETER(last_pinned_plug);
+#endif //_DEBUG
mark& m = mark_stack_array[mark_stack_tos - 1];
assert (last_pinned_plug == m.first);
}
inline
-uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
+void gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
{
if ((o >= gc_low) && (o < gc_high))
mark_object_simple (&o THREAD_NUMBER_ARG);
mark_object_simple (&o THREAD_NUMBER_ARG);
}
#endif //MULTIPLE_HEAPS
-
- return o;
}
#ifdef BACKGROUND_GC
return o;
}
-void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
-{
- UNREFERENCED_PARAMETER(sc);
-
- assert (settings.concurrent);
- uint8_t* o = (uint8_t*)object;
-
- gc_heap* hp = gc_heap::heap_of (o);
-#ifdef INTERIOR_POINTERS
- if (flags & GC_CALL_INTERIOR)
- {
- o = hp->find_object (o, background_saved_lowest_address);
- }
-#endif //INTERIOR_POINTERS
-
- if (!background_object_marked (o, FALSE))
- {
- FATAL_GC_ERROR();
- }
-}
-
void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
{
UNREFERENCED_PARAMETER(sc);
return;
}
-#ifdef INTERIOR_POINTERS
if (flags & GC_CALL_INTERIOR)
{
o = hp->find_object (o, hp->background_saved_lowest_address);
if (o == 0)
return;
}
-#endif //INTERIOR_POINTERS
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
}
}
- // Restart all the workers.
dprintf(3, ("Starting all gc thread mark stack overflow processing"));
gc_t_join.restart();
}
gc_t_join.join(this, gc_join_rescan_dependent_handles);
if (gc_t_join.joined())
{
- // Restart all the workers.
dprintf(3, ("Starting all gc thread for dependent handle promotion"));
gc_t_join.restart();
}
#ifdef MULTIPLE_HEAPS
gc_t_join.join(this, gc_join_begin_mark_phase);
if (gc_t_join.joined())
- {
#endif //MULTIPLE_HEAPS
-
+ {
maxgen_size_inc_p = false;
num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
#endif //MH_SC_MARK
gc_t_join.restart();
- }
#endif //MULTIPLE_HEAPS
+ }
{
-
#ifdef MARK_LIST
//set up the mark lists from g_mark_list
assert (g_mark_list);
slow = MAX_PTR;
#endif //MULTIPLE_HEAPS
- //%type% category = quote (mark);
-
if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
{
GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
last_promoted_bytes = promoted_bytes (heap_number);
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
scan_background_roots (GCHeap::Promote, heap_number, &sc);
}
#ifdef TRACE_GC
size_t promoted_before_cards = promoted_bytes (heap_number);
+ dprintf (3, ("before cards: %Id", promoted_before_cards));
#endif //TRACE_GC
- dprintf (3, ("before cards: %Id", promoted_before_cards));
if (!full_p)
{
#ifdef CARD_BUNDLE
gc_t_join.r_init();
}
- //start all threads on the roots.
dprintf(3, ("Starting all gc thread for short weak handle scan"));
gc_t_join.restart();
#endif //MULTIPLE_HEAPS
-
}
#ifdef FEATURE_CARD_MARKING_STEALING
dprintf(3, ("Joining for finalization"));
gc_t_join.join(this, gc_join_scan_finalization);
if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
-
{
-#ifdef MULTIPLE_HEAPS
- //start all threads on the roots.
dprintf(3, ("Starting all gc thread for Finalization"));
gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
}
+#endif //MULTIPLE_HEAPS
//Handle finalization.
size_t promoted_bytes_live = promoted_bytes (heap_number);
gc_t_join.join(this, gc_join_null_dead_long_weak);
if (gc_t_join.joined())
{
- //start all threads on the roots.
dprintf(3, ("Starting all gc thread for weak pointer deletion"));
gc_t_join.restart();
}
GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
#ifdef MULTIPLE_HEAPS
-
-#ifdef MARK_LIST
-#ifndef PARALLEL_MARK_LIST_SORT
+#if defined(MARK_LIST) && !defined(PARALLEL_MARK_LIST_SORT)
//compact g_mark_list and sort it.
combine_mark_lists();
-#endif //PARALLEL_MARK_LIST_SORT
-#endif //MARK_LIST
+#endif //MARK_LIST && !PARALLEL_MARK_LIST_SORT
//decide on promotion
if (!settings.promotion)
}
#endif //SNOOP_STATS
- //start all threads.
dprintf(3, ("Starting all threads for end of mark phase"));
gc_t_join.restart();
#else //MULTIPLE_HEAPS
settings.promotion = TRUE;
}
}
-
#endif //MULTIPLE_HEAPS
}
ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
//clear the left bit and the relocation field
*place &= 1;
- // store the value
*place |= val;
}
void gc_heap::sweep_ro_segments (heap_segment* start_seg)
{
//go through all of the segment in range and reset the mark bit
- //TODO works only on small object segments
-
heap_segment* seg = start_seg;
while (seg)
seg_clear_mark_bits (seg);
}
#else //BACKGROUND_GC
-
-#ifdef MARK_ARRAY
- if(gc_can_use_concurrent)
- {
- clear_mark_array (max (heap_segment_mem (seg), lowest_address),
- min (heap_segment_allocated (seg), highest_address),
- FALSE); // read_only segments need the mark clear
- }
-#else //MARK_ARRAY
seg_clear_mark_bits (seg);
-#endif //MARK_ARRAY
-
#endif //BACKGROUND_GC
}
seg = heap_segment_next (seg);
return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) + size) <= alloc_limit);
}
-uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
+uint8_t* gc_heap::loh_allocate_in_condemned (size_t size)
{
- UNREFERENCED_PARAMETER(old_loc);
-
generation* gen = large_object_generation;
dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id",
generation_allocation_pointer (gen),
}
else
{
- new_address = loh_allocate_in_condemned (o, size);
+ new_address = loh_allocate_in_condemned (size);
}
loh_set_node_relocation_distance (o, (new_address - o));
//Skip the generation gap object
o = o + AlignQword (size (o));
- relocate_args args;
- args.low = gc_low;
- args.high = gc_high;
- args.last_plug = 0;
-
while (1)
{
if (o >= heap_segment_allocated (seg))
assert (settings.concurrent == FALSE);
- // %type% category = quote (plan);
-
dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
condemned_gen_number, settings.promotion ? 1 : 0));
if ((condemned_gen_number < max_generation) &&
(mark_list_index <= mark_list_end)
#ifdef BACKGROUND_GC
- && (!recursive_gc_sync::background_running_p())
+ && (!gc_heap::background_running_p())
#endif //BACKGROUND_GC
)
{
#ifdef BACKGROUND_GC
if (current_c_gc_state == c_gc_state_marking)
{
- assert (recursive_gc_sync::background_running_p());
+ assert (gc_heap::background_running_p());
while ((xl < end) && !marked (xl))
{
dprintf (4, ("-%Ix-", (size_t)xl));
}
}
- //start all threads on the roots.
dprintf(3, ("Starting all gc threads after compaction decision"));
gc_t_join.restart();
}
- //reset the local variable accordingly
should_compact = (gc_policy >= policy_compact);
should_expand = (gc_policy >= policy_expand);
dprintf(3, ("Joining after end of compaction"));
gc_t_join.join(this, gc_join_adjust_handle_age_compact);
if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
{
-#ifdef MULTIPLE_HEAPS
//join all threads to make sure they are synchronized
dprintf(3, ("Restarting after Promotion granted"));
gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
}
+#endif //MULTIPLE_HEAPS
ScanContext sc;
sc.thread_number = heap_number;
if (settings.promotion && !settings.demotion)
{
uint8_t* start = generation_allocation_start (youngest_generation);
- MAYBE_UNUSED_VAR(start);
+#ifdef _DEBUG
assert (heap_segment_allocated (ephemeral_heap_segment) ==
(start + Align (size (start))));
+#endif //_DEBUG
}
}
else
void gc_heap::fix_generation_bounds (int condemned_gen_number,
generation* consing_gen)
{
+#ifndef _DEBUG
UNREFERENCED_PARAMETER(consing_gen);
+#endif //_DEBUG
assert (generation_allocation_segment (consing_gen) ==
ephemeral_heap_segment);
// new ephemeral generations will be empty and there'll be no need to set cards for the
// old ephemeral generations that got promoted into max_generation.
ptrdiff_t delta = 0;
-#ifdef SEG_MAPPING_TABLE
heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
-#else //SEG_MAPPING_TABLE
- heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
-#endif //SEG_MAPPING_TABLE
assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
{
alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
//reset the allocated size
+#ifdef _DEBUG
uint8_t* start = generation_allocation_start (youngest_generation);
- MAYBE_UNUSED_VAR(start);
if (settings.promotion && !settings.demotion)
{
assert ((start + Align (size (start))) ==
heap_segment_plan_allocated(ephemeral_heap_segment));
}
-
+#endif //_DEBUG
heap_segment_allocated(ephemeral_heap_segment)=
heap_segment_plan_allocated(ephemeral_heap_segment);
}
{
//We had an empty segment
//need to allocate the generation start
-
generation* gen = generation_of (max_generation);
heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
#ifdef FEATURE_BASICFREEZE
bool gc_heap::frozen_object_p (Object* obj)
{
-#ifdef MULTIPLE_HEAPS
-#ifdef SEG_MAPPING_TABLE
- heap_segment* pSegment = seg_mapping_table_segment_of((uint8_t*)obj);
-#else
- ptrdiff_t delta = 0;
- heap_segment* pSegment = segment_of ((uint8_t*)obj, delta);
-#endif
-#else //MULTIPLE_HEAPS
- heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
- _ASSERTE(pSegment);
-#endif //MULTIPLE_HEAPS
-
- return heap_segment_read_only_p(pSegment);
+ heap_segment* seg = seg_mapping_table_segment_of ((uint8_t*)obj);
+ return heap_segment_read_only_p (seg);
}
#endif // FEATURE_BASICFREEZE
-#ifdef FEATURE_REDHAWK
-// TODO: this was added on RH, we have not done perf runs to see if this is the right
-// thing to do for other versions of the CLR.
-inline
-#endif // FEATURE_REDHAWK
void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
{
uint8_t* old_address = *pold_address;
#ifdef FEATURE_LOH_COMPACTION
if (loh_compacted_p)
{
-#ifdef MULTIPLE_HEAPS
-#ifdef SEG_MAPPING_TABLE
heap_segment* pSegment = seg_mapping_table_segment_of ((uint8_t*)old_address);
-#else
- ptrdiff_t delta = 0;
- heap_segment* pSegment = segment_of ((uint8_t*)old_address, delta);
-#endif
-#else //MULTIPLE_HEAPS
- heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)old_address, FALSE);
- _ASSERTE(pSegment);
-#endif //MULTIPLE_HEAPS
-
size_t flags = pSegment->flags;
if ((flags & heap_segment_flags_loh)
#ifdef FEATURE_BASICFREEZE
// if we expanded, right now we are not handling it as We are not saving the new reloc info.
void gc_heap::verify_pins_with_post_plug_info (const char* msg)
{
- UNREFERENCED_PARAMETER(msg);
-#if defined (_DEBUG) && defined (VERIFY_HEAP)
+#if defined (_DEBUG) && defined (VERIFY_HEAP)
if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
{
if (!verify_pinned_queue_p)
dprintf (3, ("%s verified", msg));
}
+#else
+ UNREFERENCED_PARAMETER(msg);
#endif // _DEBUG && VERIFY_HEAP
}
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
{
- // This should only be called for BGCs
assert(settings.concurrent);
for (int i = max_generation; i < total_generation_count; i++)
}
}
}
-#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
void gc_heap::relocate_phase (int condemned_gen_number,
uint8_t* first_condemned_address)
sc.promotion = FALSE;
sc.concurrent = FALSE;
-// %type% category = quote (relocate);
- dprintf (2,("---- Relocate phase -----"));
-
#ifdef MULTIPLE_HEAPS
//join all threads to make sure they are synchronized
dprintf(3, ("Joining after end of plan"));
gc_t_join.join(this, gc_join_begin_relocate_phase);
if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
-
{
-#ifdef MULTIPLE_HEAPS
//join all threads to make sure they are synchronized
dprintf(3, ("Restarting for relocation"));
gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
}
+#endif //MULTIPLE_HEAPS
+
+ dprintf (2,("---- Relocate phase -----"));
dprintf(3,("Relocating roots"));
GCScan::GcScanRoots(GCHeap::Relocate,
verify_pins_with_post_plug_info("after reloc stack");
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
scan_background_roots (GCHeap::Relocate, heap_number, &sc);
}
}
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
-#ifdef MULTIPLE_HEAPS
- //join all threads to make sure they are synchronized
- dprintf(3, ("Joining after end of relocation"));
- gc_t_join.join(this, gc_join_relocate_phase_done);
-#endif //MULTIPLE_HEAPS
-
dprintf(2,( "---- End of Relocate phase ----"));
}
uint8_t* first_condemned_address,
BOOL clear_cards)
{
-// %type% category = quote (compact);
+#ifdef MULTIPLE_HEAPS
+ dprintf(3, ("Joining after end of relocation"));
+ gc_t_join.join(this, gc_join_relocate_phase_done);
+ if (gc_t_join.joined())
+ {
+ dprintf(3, ("Restarting for compaction"));
+ gc_t_join.restart();
+ }
+#endif //MULTIPLE_HEAPS
+
generation* condemned_gen = generation_of (condemned_gen_number);
uint8_t* start_address = first_condemned_address;
size_t current_brick = brick_of (start_address);
heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
-
PREFIX_ASSUME(current_heap_segment != NULL);
reset_pinned_queue_bos();
dprintf (2,("---- Compact Phase: %Ix(%Ix)----",
first_condemned_address, brick_of (first_condemned_address)));
-#ifdef MULTIPLE_HEAPS
- //restart
- if (gc_t_join.joined())
- {
-#endif //MULTIPLE_HEAPS
-
-#ifdef MULTIPLE_HEAPS
- dprintf(3, ("Restarting for compaction"));
- gc_t_join.restart();
- }
-#endif //MULTIPLE_HEAPS
-
- reset_pinned_queue_bos();
-
#ifdef FEATURE_LOH_COMPACTION
if (loh_compacted_p)
{
#pragma warning(pop)
#endif //_MSC_VER
-#endif //BACKGROUND_GC
-
-/*------------------ Background GC ----------------------------*/
-
-#ifdef BACKGROUND_GC
-
void gc_heap::background_drain_mark_list (int thread)
{
+#ifndef MULTIPLE_HEAPS
UNREFERENCED_PARAMETER(thread);
+#endif //!MULTIPLE_HEAPS
size_t saved_c_mark_list_index = c_mark_list_index;
}
}
- // Restart all the workers.
dprintf(2, ("Starting all gc thread mark stack overflow processing"));
bgc_t_join.restart();
}
bgc_t_join.join(this, gc_join_rescan_dependent_handles);
if (bgc_t_join.joined())
{
- // Restart all the workers.
dprintf(3, ("Starting all gc thread for dependent handle promotion"));
bgc_t_join.restart();
}
void gc_heap::recover_bgc_settings()
{
- if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
+ if ((settings.condemned_generation < max_generation) && gc_heap::background_running_p())
{
dprintf (2, ("restoring bgc settings"));
settings = saved_bgc_settings;
BOOL gc_heap::should_commit_mark_array()
{
- return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
+ return (gc_heap::background_running_p() || (current_bgc_state == bgc_initialized));
}
void gc_heap::clear_commit_flag()
#endif //_DEBUG
}
-void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
-{
- verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
-}
-
BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
heap_segment* seg,
uint32_t* new_card_table,
uint8_t* new_lowest_address)
{
- UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
-
uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
uint8_t* end = heap_segment_reserved (seg);
return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
}
-BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
+BOOL gc_heap::commit_mark_array_bgc_init()
{
- UNREFERENCED_PARAMETER(mark_array_addr);
-
dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix",
lowest_address, highest_address, mark_array));
// Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
// can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
// the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
-#ifdef WRITE_WATCH
concurrent_print_time_delta ("CRWW begin");
#ifdef MULTIPLE_HEAPS
#endif //MULTIPLE_HEAPS
concurrent_print_time_delta ("CRWW");
-#endif //WRITE_WATCH
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
dprintf (GTC_LOG, ("setting cm_in_progress"));
c_write (cm_in_progress, TRUE);
- //restart all thread, doing the marking from the array
assert (dont_restart_ee_p);
dont_restart_ee_p = FALSE;
#ifdef MULTIPLE_HEAPS
bgc_t_join.join(this, gc_join_after_absorb);
if (bgc_t_join.joined())
+#endif //MULTIPLE_HEAPS
{
#ifdef BGC_SERVO_TUNING
bgc_tuning::record_bgc_sweep_start();
#endif //BGC_SERVO_TUNING
+#ifdef MULTIPLE_HEAPS
dprintf(3, ("Joining BGC threads after absorb"));
bgc_t_join.restart();
- }
#endif //MULTIPLE_HEAPS
+ }
// give VM a chance to do work
GCToEEInterface::GcBeforeBGCSweepWork();
//concurrent_print_time_delta ("nonconcurrent marking stack roots");
concurrent_print_time_delta ("NRS");
-// finalize_queue->EnterFinalizeLock();
finalize_queue->GcScanRoots(background_promote, heap_number, 0);
-// finalize_queue->LeaveFinalizeLock();
dprintf (2, ("nonconcurrent marking handle table"));
GCScan::GcScanHandles(background_promote,
}
#endif //MULTIPLE_HEAPS
- //Handle finalization.
dprintf(3,("Marking finalization data"));
//concurrent_print_time_delta ("bgc joined to mark finalization");
concurrent_print_time_delta ("NRj");
if (heap_segment_allocated (seg) == heap_segment_mem (seg))
{
- // This can't happen...
FATAL_GC_ERROR();
}
void gc_heap::revisit_written_page (uint8_t* page,
uint8_t* end,
BOOL concurrent_p,
- heap_segment* seg,
uint8_t*& last_page,
uint8_t*& last_object,
BOOL large_objects_p,
size_t& num_marked_objects)
{
- UNREFERENCED_PARAMETER(seg);
-
uint8_t* start_address = page;
uint8_t* o = 0;
int align_const = get_alignment_constant (!large_objects_p);
// allocated out of range all live.
void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
{
-#ifdef WRITE_WATCH
if (concurrent_p && !reset_only_p)
{
current_bgc_state = bgc_revisit_soh;
{
//search for marked objects in the page
revisit_written_page (page, high_address, concurrent_p,
- seg, last_page, last_object,
- !small_object_segments,
- total_marked_objects);
+ last_page, last_object,
+ !small_object_segments,
+ total_marked_objects);
}
else
{
if (!reset_only_p)
{
dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
- fire_revisit_event (total_dirtied_pages, total_marked_objects, /* large_objects_p */ FALSE);
+ fire_revisit_event (total_dirtied_pages, total_marked_objects, FALSE);
concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
total_dirtied_pages = 0;
total_marked_objects = 0;
}
- //switch to large segment
if (concurrent_p && !reset_only_p)
{
current_bgc_state = bgc_revisit_uoh;
}
small_object_segments = FALSE;
- //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");
-
dprintf (3, ("now revisiting large object segments"));
}
else
else
{
dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
- fire_revisit_event (total_dirtied_pages, total_marked_objects, /* large_objects_p */ TRUE);
+ fire_revisit_event (total_dirtied_pages, total_marked_objects, TRUE);
}
}
}
-
-#endif //WRITE_WATCH
}
void gc_heap::background_grow_c_mark_list()
return;
}
-#ifdef INTERIOR_POINTERS
if (flags & GC_CALL_INTERIOR)
{
o = hp->find_object (o, hp->background_saved_lowest_address);
if (o == 0)
return;
}
-#endif //INTERIOR_POINTERS
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
bgc_start_event.CloseEvent();
bgc_threads_timeout_cs.Destroy();
bgc_thread = 0;
- recursive_gc_sync::shutdown();
}
void gc_heap::bgc_thread_function()
dprintf (3, ("no concurrent GC needed, exiting"));
break;
}
- recursive_gc_sync::begin_background();
+ gc_background_running = TRUE;
dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d",
generation_free_list_space (generation_of (max_generation)),
generation_free_obj_space (generation_of (max_generation)),
#endif //MULTIPLE_HEAPS
c_write (settings.concurrent, FALSE);
- recursive_gc_sync::end_background();
+ gc_background_running = FALSE;
keep_bgc_threads_p = FALSE;
background_gc_done_event.Set();
gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
- if (!(recursive_gc_sync::background_running_p()))
+ if (!(gc_heap::background_running_p()))
{
size_t current_alloc = get_total_servo_alloc (loh_generation);
tuning_calculation* current_gen_calc = &gen_calc[loh_generation - max_generation];
bool gc_heap::bgc_tuning::should_trigger_bgc()
{
- if (!bgc_tuning::enable_fl_tuning || recursive_gc_sync::background_running_p())
+ if (!bgc_tuning::enable_fl_tuning || gc_heap::background_running_p())
{
return false;
}
size_t curr_cl = (size_t)next_o / brick_size;
size_t min_cl = (size_t)first_object / brick_size;
- //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
#ifdef TRACE_GC
unsigned int n_o = 1;
#endif //TRACE_GC
}
uint8_t*
-gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
+gc_heap::compute_next_boundary (int gen_number,
BOOL relocating)
{
- UNREFERENCED_PARAMETER(low);
-
//when relocating, the fault line is the plan start of the younger
//generation because the generation is promoted.
if (relocating && (gen_number == (settings.condemned_generation + 1)))
gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
size_t& cg_pointers_found)
{
- THREAD_FROM_HEAP;
if ((gc_low <= o) && (gc_high > o))
{
n_gen++;
generation* oldest_gen = generation_of (max_generation);
int curr_gen_number = max_generation;
uint8_t* gen_boundary = generation_allocation_start(generation_of(curr_gen_number - 1));
- uint8_t* next_boundary = compute_next_boundary(gc_low, curr_gen_number, relocating);
+ uint8_t* next_boundary = compute_next_boundary(curr_gen_number, relocating);
heap_segment* seg = heap_segment_rw (generation_start_segment (oldest_gen));
PREFIX_ASSUME(seg != NULL);
gen_boundary = generation_allocation_start
(generation_of (curr_gen_number - 1));
next_boundary = (compute_next_boundary
- (low, curr_gen_number, relocating));
+ (curr_gen_number, relocating));
}
dprintf (4, ("|%Ix|", (size_t)o));
}
BOOL gc_heap::process_free_space (heap_segment* seg,
- size_t free_space,
- size_t min_free_size,
- size_t min_cont_size,
- size_t* total_free_space,
- size_t* largest_free_space)
+ size_t free_space,
+ size_t min_free_size,
+ size_t min_cont_size,
+ size_t* total_free_space,
+ size_t* largest_free_space)
{
*total_free_space += free_space;
*largest_free_space = max (*largest_free_space, free_space);
}
//tally up free space
-
while (1)
{
while (free_list)
generation* consing_gen,
heap_segment* new_heap_segment)
{
+#ifndef _DEBUG
UNREFERENCED_PARAMETER(condemned_generation);
+#endif //!_DEBUG
assert (condemned_generation >= (max_generation -1));
unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
uint8_t* start_address = generation_limit (max_generation);
BOOL should_promote_ephemeral = FALSE;
ptrdiff_t eph_size = total_ephemeral_size;
#ifdef BACKGROUND_GC
- dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
+ dprintf(2,("%s: ---- Heap Expansion ----", (gc_heap::background_running_p() ? "FGC" : "NGC")));
#endif //BACKGROUND_GC
settings.heap_expansion = TRUE;
get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
#endif //!MULTIPLE_HEAPS
}
- ptrdiff_t eph_size1 = total_ephemeral_size;
- MAYBE_UNUSED_VAR(eph_size1);
if (!should_promote_ephemeral && new_segment_p)
{
- assert (eph_size1 <= eph_size);
+ assert ((ptrdiff_t)total_ephemeral_size <= eph_size);
}
if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
dd_new_allocation (dd) = dd_gc_new_allocation (dd);
- //update counter
dd_promoted_size (dd) = out;
if (gen_number == max_generation)
{
end_poh_size = total_gen_size;
#endif //BACKGROUND_GC
- //update counter
dd_promoted_size (dd) = out;
}
}
fragmentation, (int)(fragmentation_burden * 100.0)));
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
- // for pure GC stress runs we need compaction, for GC stress "mix"
- // we need to ensure a better mix of compacting and sweeping collections
- if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
- && !g_pConfig->IsGCStressMix())
+ // for GC stress runs we need compaction
+ if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent)
should_compact = TRUE;
-
-#ifdef GC_STATS
- // in GC stress "mix" mode, for stress induced collections make sure we
- // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
- // against the GC's determination, as it may lead to premature OOMs.
- if (g_pConfig->IsGCStressMix() && settings.stress_induced)
- {
- int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
- int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
- if (compactions < sweeps / 10)
- {
- should_compact = TRUE;
- }
- }
-#endif // GC_STATS
#endif //defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
if (GCConfig::GetForceCompact())
#pragma inline_depth(20)
#endif //_MSC_VER
-#ifdef MARK_ARRAY
- uint8_t* current_lowest_address = lowest_address;
- uint8_t* current_highest_address = highest_address;
-#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
- {
- current_lowest_address = background_saved_lowest_address;
- current_highest_address = background_saved_highest_address;
- }
-#endif //BACKGROUND_GC
-#endif // MARK_ARRAY
-
#ifdef FEATURE_LOH_COMPACTION
// The GC allocator made a free object already in this alloc context and
// adjusted the alloc_ptr accordingly.
CObjectHeader* obj = (CObjectHeader*)result;
-#ifdef MARK_ARRAY
- if (recursive_gc_sync::background_running_p())
+#ifdef BACKGROUND_GC
+ if (gc_heap::background_running_p())
{
+ uint8_t* current_lowest_address = background_saved_lowest_address;
+ uint8_t* current_highest_address = background_saved_highest_address;
+
if ((result < current_highest_address) && (result >= current_lowest_address))
{
dprintf (3, ("Clearing mark bit at address %Ix",
mark_array_clear_marked (result);
}
-#ifdef BACKGROUND_GC
if (current_c_gc_state != c_gc_state_free)
{
dprintf (3, ("Concurrent allocation of a large object %Ix",
mark_array_set_marked (result);
}
}
-#endif //BACKGROUND_GC
}
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
assert (obj != 0);
assert ((size_t)obj == Align ((size_t)obj, align_const));
heap_segment_next (next_seg) = prev_seg;
dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
- heap_segment_next (seg) = freeable_small_heap_segment;
- freeable_small_heap_segment = seg;
+ heap_segment_next (seg) = freeable_soh_segment;
+ freeable_soh_segment = seg;
}
decommit_heap_segment (seg);
#ifdef MULTIPLE_HEAPS
bgc_t_join.join(this, gc_join_restart_ee);
if (bgc_t_join.joined())
-#endif //MULTIPLE_HEAPS
{
-#ifdef MULTIPLE_HEAPS
dprintf(2, ("Starting BGC threads for resuming EE"));
bgc_t_join.restart();
-#endif //MULTIPLE_HEAPS
}
+#endif //MULTIPLE_HEAPS
if (heap_number == 0)
{
}
}
-void gc_heap::descr_segment (heap_segment* seg )
-{
-#ifdef TRACE_GC
- uint8_t* x = heap_segment_mem (seg);
- while (x < heap_segment_allocated (seg))
- {
- dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
- x = x + Align(size (x));
- }
-#else // TRACE_GC
- UNREFERENCED_PARAMETER(seg);
-#endif // TRACE_GC
-}
-
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
#ifdef MULTIPLE_HEAPS
void gc_heap::descr_generations (BOOL begin_gc_p)
{
+#ifndef TRACE_GC
UNREFERENCED_PARAMETER(begin_gc_p);
+#endif //!TRACE_GC
+
#ifdef STRESS_LOG
if (StressLog::StressLogOn(LF_GC, LL_INFO10))
{
//
//-----------------------------------------------------------------------------
-
-#ifdef TRACE_GC
-
- unsigned int PromotedObjectCount = 0;
- unsigned int CreatedObjectCount = 0;
- unsigned int AllocCount = 0;
- unsigned int AllocBigCount = 0;
- unsigned int AllocSmallCount = 0;
- int64_t AllocStart = 0;
- int64_t AllocDuration = 0;
-#endif //TRACE_GC
-
//Static member variables.
VOLATILE(BOOL) GCHeap::GcInProgress = FALSE;
-//GCTODO
-//CMCSafeLock* GCHeap::fGcLock;
-GCEvent *GCHeap::WaitForGCEvent = NULL;
-//GCTODO
-#if defined(TRACE_GC) && !defined(MULTIPLE_HEAPS)
-uint64_t GCHeap::GcDuration;
-#endif //TRACE_GC
-unsigned GCHeap::GcCondemnedGeneration = 0;
-size_t GCHeap::totalSurvivedSize = 0;
+GCEvent *GCHeap::WaitForGCEvent = NULL;
+unsigned GCHeap::GcCondemnedGeneration = 0;
+size_t GCHeap::totalSurvivedSize = 0;
#ifdef FEATURE_PREMORTEM_FINALIZATION
-CFinalize* GCHeap::m_Finalize = 0;
-BOOL GCHeap::GcCollectClasses = FALSE;
-VOLATILE(int32_t) GCHeap::m_GCFLock = 0;
+CFinalize* GCHeap::m_Finalize = 0;
+BOOL GCHeap::GcCollectClasses = FALSE;
+VOLATILE(int32_t) GCHeap::m_GCFLock = 0;
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
-#ifdef BACKGROUND_GC
-int GCHeap::gc_stress_fgcs_in_bgc = 0;
-#endif // BACKGROUND_GC
#ifndef MULTIPLE_HEAPS
-OBJECTHANDLE GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
-int GCHeap::m_CurStressObj = 0;
+OBJECTHANDLE GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
+int GCHeap::m_CurStressObj = 0;
#endif // !MULTIPLE_HEAPS
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
+#ifdef VERIFY_HEAP
+ if (gc_heap::background_running_p() &&
+ (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
{
uint8_t* range_beg = 0;
uint8_t* range_end = 0;
}
}
}
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
}
void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
+#ifdef VERIFY_HEAP
size_t start_mark_bit = mark_bit_of (obj) + 1;
size_t end_mark_bit = mark_bit_of (obj + s);
unsigned int startbit = mark_bit_bit (start_mark_bit);
FATAL_GC_ERROR();
}
}
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
}
void gc_heap::clear_all_mark_array()
{
-#ifdef MARK_ARRAY
- //size_t num_dwords_written = 0;
- //size_t begin_time = GetHighPrecisionTimeStamp();
-
for (int i = max_generation; i < total_generation_count; i++)
{
generation* gen = generation_of (i);
seg = heap_segment_next_rw (seg);
}
}
-
- //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;
-
- //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
-
-#endif //MARK_ARRAY
}
-#endif //BACKGROUND_GC
-
-void gc_heap::verify_mark_array_cleared (heap_segment* seg)
+void gc_heap::verify_mark_array_cleared()
{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- assert (card_table == g_gc_card_table);
- size_t markw = mark_word_of (heap_segment_mem (seg));
- size_t markw_end = mark_word_of (heap_segment_reserved (seg));
-
- while (markw < markw_end)
- {
- if (mark_array [markw])
- {
- dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
- markw, mark_array [markw], mark_word_address (markw)));
- FATAL_GC_ERROR();
- }
- markw++;
- }
-#endif //VERIFY_HEAP && MARK_ARRAY
-}
-
-void gc_heap::verify_mark_array_cleared ()
-{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
+#ifdef VERIFY_HEAP
+ if (gc_heap::background_running_p() &&
+ (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
{
for (int i = max_generation; i < total_generation_count; i++)
{
}
}
}
-#endif //VERIFY_HEAP && MARK_ARRAY
-}
-
-void gc_heap::verify_seg_end_mark_array_cleared()
-{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
- {
- for (int i = max_generation; i < total_generation_count; i++)
- {
- generation* gen = generation_of (i);
- heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
-
- while (seg)
- {
- // We already cleared all mark array bits for ephemeral generations
- // at the beginning of bgc sweep
- uint8_t* from = ((seg == ephemeral_heap_segment) ?
- generation_allocation_start (generation_of (max_generation - 1)) :
- heap_segment_allocated (seg));
- size_t markw = mark_word_of (align_on_mark_word (from));
- size_t markw_end = mark_word_of (heap_segment_reserved (seg));
-
- while (from < mark_word_address (markw))
- {
- if (is_mark_bit_set (from))
- {
- dprintf (3, ("mark bit for %Ix was not cleared", from));
- FATAL_GC_ERROR();
- }
-
- from += mark_bit_pitch;
- }
-
- while (markw < markw_end)
- {
- if (mark_array [markw])
- {
- dprintf (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
- markw, mark_array [markw], mark_word_address (markw)));
- FATAL_GC_ERROR();
- }
- markw++;
- }
- seg = heap_segment_next_rw (seg);
- }
- }
- }
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
}
+#endif //BACKGROUND_GC
// This function is called to make sure we don't mess up the segment list
// in SOH. It's called by:
// sweep.
// NOTE - to be able to call this function during background sweep, we need to temporarily
// NOT clear the mark array bits as we go.
-void gc_heap::verify_partial ()
-{
#ifdef BACKGROUND_GC
-
+void gc_heap::verify_partial()
+{
// Different ways to fail.
BOOL mark_missed_p = FALSE;
BOOL bad_ref_p = FALSE;
seg = heap_segment_next_rw (seg);
}
}
-#endif //BACKGROUND_GC
}
+#endif //BACKGROUND_GC
#ifdef VERIFY_HEAP
-
void
gc_heap::verify_free_lists ()
{
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS
+#ifndef TRACE_GC
UNREFERENCED_PARAMETER(begin_gc_p);
+#endif //!TRACE_GC
+
#ifdef BACKGROUND_GC
dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin",
(begin_gc_p ? "BEG" : "END"),
VolatileLoad(&settings.gc_index),
- (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
+ (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC"))));
#else
dprintf (2,("[%s]GC#%d: Verifying heap - begin",
(begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
deep_verify_obj = FALSE;
- ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
+ ((CObjectHeader*)curr_object)->ValidateHeap(deep_verify_obj);
if (can_verify_deep)
{
#ifdef BACKGROUND_GC
dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id",
- (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
+ (settings.concurrent ? "BGC" : (gc_heap::background_running_p () ? "FGC" : "NGC")),
(begin_gc_p ? "BEG" : "END"),
((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
total_objects_verified, total_objects_verified_deep));
}
#ifdef BACKGROUND_GC
- if (!settings.concurrent)
- {
- if (current_c_gc_state == c_gc_state_planning)
- {
- // temporarily commenting this out 'cause an FGC
- // could be triggered before we sweep ephemeral.
- //verify_seg_end_mark_array_cleared();
- }
- }
-
if (settings.concurrent)
{
verify_mark_array_cleared();
}
dprintf (2,("GC%d(%s): Verifying heap - end",
VolatileLoad(&settings.gc_index),
- (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
+ (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC"))));
#else
dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
#endif //BACKGROUND_GC
#endif // VERIFY_HEAP
}
-void DestructObject (CObjectHeader* hdr)
-{
- UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
- hdr->~CObjectHeader();
-}
-
-HRESULT GCHeap::StaticShutdown ()
+HRESULT GCHeap::StaticShutdown()
{
deleteGCShadow();
g_num_processors = GCToOSInterface::GetTotalProcessorCount();
assert(g_num_processors != 0);
-//Initialize the static members.
-#ifdef TRACE_GC
- GcDuration = 0;
- CreatedObjectCount = 0;
-#endif //TRACE_GC
-
bool is_restricted;
gc_heap::total_physical_mem = (size_t)GCConfig::GetGCTotalPhysicalMemory();
if (!(gc_heap::total_physical_mem))
if (is_restricted)
{
uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100;
- //printf ("returned physical mem %I64d, setting it to max (75%%: %I64d, 20mb)\n",
- // gc_heap::total_physical_mem, physical_mem_for_gc);
gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
}
}
-
- //printf ("heap_hard_limit is %Id, total physical mem: %Id, %s restricted\n",
- // gc_heap::heap_hard_limit, gc_heap::total_physical_mem, (is_restricted ? "is" : "is not"));
#endif //HOST_64BIT
uint32_t nhp = 1;
gc_heap::min_uoh_segment_size = large_seg_size;
-#ifdef SEG_MAPPING_TABLE
if (gc_heap::min_segment_size == 0)
{
gc_heap::min_segment_size = min (seg_size, large_seg_size);
}
gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size);
-#endif //SEG_MAPPING_TABLE
#ifdef MULTIPLE_HEAPS
gc_heap::n_heaps = nhp;
dprintf (3, ("Promote %Ix", (size_t)o));
-#ifdef INTERIOR_POINTERS
if (flags & GC_CALL_INTERIOR)
{
if ((o < hp->gc_low) || (o >= hp->gc_high))
}
}
-#endif //INTERIOR_POINTERS
#ifdef FEATURE_CONSERVATIVE_GC
// For conservative GC, a value on stack may point to middle of a free object.
#endif
#ifdef _DEBUG
- ((CObjectHeader*)o)->ValidatePromote(sc, flags);
+ ((CObjectHeader*)o)->Validate();
#else
UNREFERENCED_PARAMETER(sc);
#endif //_DEBUG
void StressHeapDummy ();
-static int32_t GCStressStartCount = -1;
-static int32_t GCStressCurCount = 0;
-static int32_t GCStressStartAtJit = -1;
-
-// the maximum number of foreground GCs we'll induce during one BGC
-// (this number does not include "naturally" occurring GCs).
-static int32_t GCStressMaxFGCsPerBGC = -1;
-
// CLRRandom implementation can produce FPU exceptions if
// the test/application run by CLR is enabling any FPU exceptions.
// We want to avoid any unexpected exception coming from stress
}
#endif //BACKGROUND_GC
- if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
- {
- GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
- GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
- }
-
- if (GCStressMaxFGCsPerBGC == -1)
- {
- GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
- if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
- GCStressMaxFGCsPerBGC = 6;
- }
-
-#ifdef _DEBUG
- if (g_JitCount < GCStressStartAtJit)
- return FALSE;
-#endif //_DEBUG
-
- // Allow programmer to skip the first N Stress GCs so that you can
- // get to the interesting ones faster.
- Interlocked::Increment(&GCStressCurCount);
- if (GCStressCurCount < GCStressStartCount)
- return FALSE;
-
- // throttle the number of stress-induced GCs by a factor given by GCStressStep
- if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
- {
- return FALSE;
- }
-
-#ifdef BACKGROUND_GC
- if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
- {
- // allow a maximum number of stress induced FGCs during one BGC
- if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
- return FALSE;
- ++gc_stress_fgcs_in_bgc;
- }
-#endif // BACKGROUND_GC
-
if (g_pStringClass == 0)
{
// If the String class has not been loaded, dont do any stressing. This should
#else
UNREFERENCED_PARAMETER(context);
return FALSE;
-#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
+#endif //STRESS_HEAP && !FEATURE_REDHAWK
}
-
#ifdef FEATURE_PREMORTEM_FINALIZATION
#define REGISTER_FOR_FINALIZATION(_object, _size) \
hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
}
else
{
-#ifdef TRACE_GC
- AllocSmallCount++;
-#endif //TRACE_GC
-
#ifdef FEATURE_64BIT_ALIGNMENT
if (flags & GC_ALLOC_ALIGN8)
{
}
CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
-
-#ifdef TRACE_GC
- AllocCount++;
-#endif //TRACE_GC
return newAlloc;
}
dynamic_data* dd = hpt->dynamic_data_of (generation);
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
if ((mode == collection_optimized) || (mode & collection_non_blocking))
{
(gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
{
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
pGenGCHeap->background_gc_wait();
}
dd_collection_count (hp->dynamic_data_of (0)),
settings.condemned_generation,
total_allocated_since_last_gc,
- (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
+ (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")),
settings.b_state));
#else
dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)",
}
#endif //TRACE_GC
- // TODO: this can happen...it's because of the way we are calling
- // do_pre_gc, will fix later.
- //if (last_gc_index > VolatileLoad(&settings.gc_index))
- //{
- // FATAL_GC_ERROR();
- //}
-
- last_gc_index = VolatileLoad(&settings.gc_index);
GCHeap::UpdatePreGCCounters();
#if defined(__linux__)
GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
{
#ifdef BACKGROUND_GC
full_gc_counts[gc_type_background]++;
-#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
- GCHeap::gc_stress_fgcs_in_bgc = 0;
-#endif // STRESS_HEAP && !FEATURE_REDHAWK
#endif // BACKGROUND_GC
}
else
current_gen_stats->last_gen_increase_flr = gen_flr;
dprintf (BGC_TUNING_LOG, ("BTLp[g1: %Id, g2: %Id]: gen%d size inc %s %Id->%Id, flr: %.3f",
gen1_index, gen2_index, gen_number,
- (recursive_gc_sync::background_running_p() ? "during bgc" : ""),
+ (gc_heap::background_running_p() ? "during bgc" : ""),
current_gen_stats->last_bgc_physical_size, total_gen_size, gen_flr));
}
{
if (bgc_tuning::enable_fl_tuning)
{
- if (!((recursive_gc_sync::background_running_p() || (hp->current_bgc_state == bgc_initialized))))
+ if (!((gc_heap::background_running_p() || (hp->current_bgc_state == bgc_initialized))))
{
assert (settings.entry_memory_load);
initGCShadow();
}
-#ifdef TRACE_GC
- AllocStart = GCToOSInterface::QueryPerformanceCounter();
-#endif //TRACE_GC
-
#ifdef MULTIPLE_HEAPS
gc_heap* hp = g_heaps[0];
#else
dd_collection_count(hp->dynamic_data_of(0)),
GetHighPrecisionTimeStamp(),
settings.condemned_generation,
- (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
+ (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")),
(settings.compaction ? "C" : "S"),
(settings.promotion ? "P" : "S"),
settings.entry_memory_load,
}
}
-#ifdef TRACE_GC
- AllocDuration += GCToOSInterface::QueryPerformanceCounter() - AllocStart;
-#endif //TRACE_GC
-
- gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
- (reason == reason_lowmemory_blocking) ||
- (gc_heap::latency_level == latency_level_memory_footprint);
+ gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
+ (reason == reason_lowmemory_blocking) ||
+ (gc_heap::latency_level == latency_level_memory_footprint);
- gc_trigger_reason = reason;
+ gc_trigger_reason = reason;
#ifdef MULTIPLE_HEAPS
for (int i = 0; i < gc_heap::n_heaps; i++)
#endif //!MULTIPLE_HEAPS
}
-// MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));
-
-#ifdef TRACE_GC
- int64_t start;
- int64_t finish;
- start = GCToOSInterface::QueryPerformanceCounter();
- PromotedObjectCount = 0;
-#endif //TRACE_GC
-
unsigned int condemned_generation_number = gen;
// We want to get a stack from the user thread that triggered the GC
}
#endif //MULTIPLE_HEAPS
-#ifdef TRACE_GC
- finish = GCToOSInterface::QueryPerformanceCounter();
- GcDuration += finish - start;
- dprintf (3,
- ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
- VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
- finish - start, GcDuration,
- AllocCount ? (AllocDuration / AllocCount) : 0,
- AllocSmallCount, AllocBigCount));
- AllocCount = 0;
- AllocDuration = 0;
-#endif // TRACE_GC
-
#ifdef BACKGROUND_GC
// We are deciding whether we should fire the alloc wait end event here
// because in begin_foreground we could be calling end_foreground
size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
{
size_t totsize = 0;
- //GCTODO
- //ASSERT(InMustComplete());
enter_spin_lock (&pGenGCHeap->gc_lock);
heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
}
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
// If we get here, it means we are doing an FGC. If the pause
// mode was altered we will need to save it in the BGC settings.
// triggering a GC of any kind.
size_t GCHeap::ApproxFreeBytes()
{
- //GCTODO
- //ASSERT(InMustComplete());
enter_spin_lock (&pGenGCHeap->gc_lock);
generation* gen = pGenGCHeap->generation_of (0);
return (large_seg ? gc_heap::min_uoh_segment_size : gc_heap::soh_segment_size);
}
-// Get the max gen0 heap size, making sure it conforms.
size_t gc_heap::get_gen0_min_size()
{
size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
gen = 0;
if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
{
- //just reset the bit
((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
return true;
}
if (g_fFinalizerRunOnShutDown)
{
- //no method table available yet,
//put it in the finalizer queue and sort out when
//dequeueing
dest = FinalizerListSeg;
CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
{
Object* obj = 0;
- //serialize
EnterFinalizeLock();
retry:
Object** stopIndex = SegQueueLimit (FinalizerListSeg);
for (Object** po = startIndex; po < stopIndex; po++)
{
- //report *po
fn(po < stopCriticalIndex, *po);
}
}
{
//remove the object because we don't want to
//run the finalizer
-
MoveItem (i, Seg, FreeList);
//Reset the bit so it will be put back on the queue
#ifdef BACKGROUND_GC
else
{
- if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
+ if ((gen == max_generation) && (gc_heap::background_running_p()))
{
// TODO - fix the following line.
//assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
}
else
{
- //Look for demoted or promoted plugs
-
+ //Look for demoted or promoted objects
for (int i = gen; i >= 0; i--)
{
unsigned int Seg = gen_segment (i);
po--;
}
}
-
}
}
}
Object** newArray = new (nothrow) Object*[newArraySize];
if (!newArray)
{
- // It's not safe to throw here, because of the FinalizeLock. Tell our caller
- // to throw for us.
-// ASSERT (newArray);
return FALSE;
}
memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
{
- UNREFERENCED_PARAMETER(gen_number);
- GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
+ GCScan::GcScanHandlesForProfilerAndETW (gen_number, context, fn);
}
void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
{
- UNREFERENCED_PARAMETER(gen_number);
- GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
-}
-
-// Go through and touch (read) each page straddled by a memory block.
-void TouchPages(void * pStart, size_t cb)
-{
- const size_t pagesize = OS_PAGE_SIZE;
- _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
- if (cb)
- {
- VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
- VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) - (((size_t)pStart) & (pagesize-1)));
- while (p < pEnd)
- {
- char a;
- a = VolatileLoad(p);
- //printf("Touching page %lxh\n", (uint32_t)p);
- p += pagesize;
- }
- }
+ GCScan::GcScanDependentHandlesForProfilerAndETW (gen_number, context, fn);
}
#if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
#else
uint8_t *o = heap_segment_mem(seg);
- // small heap alignment constant
int alignment = get_alignment_constant(TRUE);
while (o < heap_segment_allocated(seg))
HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
{
#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
+ if (gc_heap::background_running_p())
{
uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
if (dwRet == WAIT_OBJECT_0)