Code cleanup (#34862)
authorMaoni Stephens <Maoni0@users.noreply.github.com>
Mon, 13 Apr 2020 21:05:41 +0000 (14:05 -0700)
committerGitHub <noreply@github.com>
Mon, 13 Apr 2020 21:05:41 +0000 (14:05 -0700)
made these #define's always defined -

INTERIOR_POINTERS
MARK_ARRAY (replace it with BACKGROUND_GC)
SEG_MAPPING_TABLE
GROWABLE_SEG_MAPPING_TABLE

got rid of code associated with GC_STATS, recursive_gc_sync and the following configs:
GCStressMix, GCStressStep, GCStressMaxFGCsPerBGC, GCMixLog, GCStressStart and GCStressStartAtJit.

got rid of all the unnecessary UNREFERENCED_PARAMETER
got rid of MAYBE_UNUSED_VAR

for joins, got rid of all the extra pairs of checking for #ifdef MULTIPLE_HEAPS and made code styling consistent

got rid of various other pieces of unused code and completely pointless comments.

note there is one functional change: fixed a memory leak by actually doing seg_table->delete_old_slots
when FEATURE_BASICFREEZE is defined; previously this could be leaking memory if there were more than 600
segments (which is very rare for ro segs) and it only leaks a small amount memory in that case.

Co-authored-by: Maoni Stephens <maonis@ntdev.microsoft.com>
src/coreclr/src/gc/gc.cpp
src/coreclr/src/gc/gcconfig.h
src/coreclr/src/gc/gcpriv.h
src/coreclr/src/gc/handletablescan.cpp
src/coreclr/src/inc/clrconfigvalues.h
src/coreclr/src/vm/eeconfig.cpp
src/coreclr/src/vm/eeconfig.h
src/coreclr/src/vm/object.cpp
src/coreclr/src/vm/object.h

index 50460c3..3e3096e 100644 (file)
@@ -47,12 +47,6 @@ uint64_t gc_rand::x = 0;
 BOOL bgc_heap_walk_for_etw_p = FALSE;
 #endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
 
-#if defined(FEATURE_REDHAWK)
-#define MAYBE_UNUSED_VAR(v) v = v
-#else
-#define MAYBE_UNUSED_VAR(v)
-#endif // FEATURE_REDHAWK
-
 #define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
 #define commit_min_th (16*OS_PAGE_SIZE)
 
@@ -101,7 +95,7 @@ uint8_t g_build_variant = 0;
 uint8_t g_build_variant = 1;
 #else
 uint8_t g_build_variant = 2;
-#endif // defined(BUILDENV_DEBUG)
+#endif //BUILDENV_DEBUG
 
 VOLATILE(int32_t) g_no_gc_lock = -1;
 
@@ -175,7 +169,7 @@ static const char* const str_gc_pause_modes[] =
     "sustained_low_latency",
     "no_gc"
 };
-#endif // defined(DT_LOG) || defined(TRACE_GC)
+#endif //DT_LOG || TRACE_GC
 
 inline
 BOOL is_induced (gc_reason reason)
@@ -216,144 +210,6 @@ uint64_t RawGetHighPrecisionTimeStamp()
 
 #endif
 
-#ifdef GC_STATS
-// There is a current and a prior copy of the statistics.  This allows us to display deltas per reporting
-// interval, as well as running totals.  The 'min' and 'max' values require special treatment.  They are
-// Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
-// comparison with the global min/max.
-GCStatistics g_GCStatistics;
-GCStatistics g_LastGCStatistics;
-
-char* GCStatistics::logFileName = NULL;
-FILE*  GCStatistics::logFile = NULL;
-
-void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
-{
-#ifdef BACKGROUND_GC
-    if (settings.concurrent)
-    {
-        bgc.Accumulate((uint32_t)timeInMSec*1000);
-        cntBGC++;
-    }
-    else if (settings.background_p)
-    {
-        fgc.Accumulate((uint32_t)timeInMSec*1000);
-        cntFGC++;
-        if (settings.compaction)
-            cntCompactFGC++;
-        assert(settings.condemned_generation < max_generation);
-        cntFGCGen[settings.condemned_generation]++;
-    }
-    else
-#endif // BACKGROUND_GC
-    {
-        ngc.Accumulate((uint32_t)timeInMSec*1000);
-        cntNGC++;
-        if (settings.compaction)
-            cntCompactNGC++;
-        cntNGCGen[settings.condemned_generation]++;
-    }
-
-    if (is_induced (settings.reason))
-        cntReasons[(int)reason_induced]++;
-    else if (settings.stress_induced)
-        cntReasons[(int)reason_gcstress]++;
-    else
-        cntReasons[(int)settings.reason]++;
-
-#ifdef BACKGROUND_GC
-    if (settings.concurrent || !settings.background_p)
-    {
-#endif // BACKGROUND_GC
-        RollOverIfNeeded();
-#ifdef BACKGROUND_GC
-    }
-#endif // BACKGROUND_GC
-}
-
-void GCStatistics::Initialize()
-{
-    LIMITED_METHOD_CONTRACT;
-    // for efficiency sake we're taking a dependency on the layout of a C++ object
-    // with a vtable. protect against violations of our premise:
-    static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
-            "The first field of GCStatistics follows the pointer sized vtable");
-
-    int podOffs = offsetof(GCStatistics, cntDisplay);       // offset of the first POD field
-    memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
-    memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
-}
-
-void GCStatistics::DisplayAndUpdate()
-{
-    LIMITED_METHOD_CONTRACT;
-
-    if (logFileName == NULL || logFile == NULL)
-        return;
-
-    {
-        if (cntDisplay == 0)
-            fprintf(logFile, "\nGCMix **** Initialize *****\n\n");
-
-        fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);
-
-        // NGC summary (total, timing info)
-        ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);
-
-        // FGC summary (total, timing info)
-        fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);
-
-        // BGC summary
-        bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);
-
-        // NGC/FGC break out by generation & compacting vs. sweeping
-        fprintf(logFile, "NGC   ");
-        for (int i = max_generation; i >= 0; --i)
-            fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
-        fprintf(logFile, "\n");
-
-        fprintf(logFile, "FGC   ");
-        for (int i = max_generation-1; i >= 0; --i)
-            fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
-        fprintf(logFile, "\n");
-
-        // Compacting vs. Sweeping break out
-        int _cntSweep = cntNGC-cntCompactNGC;
-        int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
-        fprintf(logFile, "NGC   Sweeping %d (%d) Compacting %d (%d)\n",
-               _cntSweep - _cntLastSweep, _cntSweep,
-               cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);
-
-        _cntSweep = cntFGC-cntCompactFGC;
-        _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
-        fprintf(logFile, "FGC   Sweeping %d (%d) Compacting %d (%d)\n",
-               _cntSweep - _cntLastSweep, _cntSweep,
-               cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);
-
-#ifdef TRACE_GC
-        // GC reasons...
-        for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
-        {
-            if (cntReasons[reason] != 0)
-                fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason],
-                    cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
-        }
-#endif // TRACE_GC
-        fprintf(logFile, "\n\n");
-
-        // flush the log file...
-        fflush(logFile);
-    }
-
-    g_LastGCStatistics = *this;
-
-    ngc.Reset();
-    fgc.Reset();
-    bgc.Reset();
-}
-
-#endif // GC_STATS
-
 #ifdef BGC_SERVO_TUNING
 bool gc_heap::bgc_tuning::enable_fl_tuning = false;
 uint32_t gc_heap::bgc_tuning::memory_load_goal = 0;
@@ -488,7 +344,6 @@ inline
 void c_write (uint32_t& place, uint32_t value)
 {
     Interlocked::Exchange (&place, value);
-    //place = value;
 }
 
 #ifndef DACCESS_COMPILE
@@ -686,7 +541,6 @@ void GCHeap::Shutdown()
 }
 
 #ifdef SYNCHRONIZATION_STATS
-
 // Number of GCs have we done since we last logged.
 static unsigned int         gc_count_during_log;
  // In ms. This is how often we print out stats.
@@ -700,7 +554,6 @@ static uint64_t             suspend_ee_during_log;
 // Cycles accumulated in RestartEE during log_interval.
 static uint64_t             restart_ee_during_log;
 static uint64_t             gc_during_log;
-
 #endif //SYNCHRONIZATION_STATS
 
 void
@@ -848,13 +701,6 @@ enum join_heap_index
     join_heap_r_restart = 200
 };
 
-struct join_event
-{
-    uint32_t heap;
-    join_time time;
-    join_type type;
-};
-
 class t_join
 {
     join_structure join_struct;
@@ -959,10 +805,7 @@ respin:
                     dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d",
                         flavor, join_id, color, (int32_t)(join_struct.join_lock)));
 
-                    //Thread* current_thread = GCToEEInterface::GetThread();
-                    //BOOL cooperative_mode = gc_heap::enable_preemptive ();
                     uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
-                    //gc_heap::disable_preemptive (cooperative_mode);
 
                     if (dwJoinWait != WAIT_OBJECT_0)
                     {
@@ -997,7 +840,6 @@ respin:
             dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
             join_struct.joined_event[!color].Reset();
             id = join_id;
-            // this one is alone so it can proceed
 #ifdef JOIN_STATS
             // remember the join id, the last thread arriving, the start of the sequential phase,
             // and keep track of the cycles spent waiting in the join
@@ -1027,7 +869,7 @@ respin:
             dprintf (JOIN_LOG, ("r_join() Waiting..."));
 
             //busy wait around the color
-        respin:
+respin:
             int spin_count = 256 * yp_spin_count_unit;
             for (int j = 0; j < spin_count; j++)
             {
@@ -1111,7 +953,7 @@ respin:
         const double ts_scale = 1e-6;
 
         // enable this printf to get statistics on each individual join as it occurs
-//      printf("join #%3d  seq_loss = %5g   par_loss = %5g  efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
+        //printf("join #%3d  seq_loss = %5g   par_loss = %5g  efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);
 
         elapsed_total[id] += sum;
         wake_total[id] += wake;
@@ -1142,13 +984,10 @@ respin:
         join_struct.joined_p = FALSE;
         join_struct.join_lock = join_struct.n_threads;
         dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
-//        printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
         int color = join_struct.lock_color.LoadWithoutBarrier();
         join_struct.lock_color = !color;
         join_struct.joined_event[color].Set();
 
-//        printf("Set joined_event %d\n", !join_struct.lock_color);
-
         fire_event (join_heap_restart, time_end, type_restart, -1);
 
 #ifdef JOIN_STATS
@@ -1210,8 +1049,7 @@ t_join bgc_t_join;
     } \
 }
 
-#ifndef DACCESS_COMPILE
-#ifdef BACKGROUND_GC
+#if defined(BACKGROUND_GC) && !(DACCESS_COMPILE)
 
 #define max_pending_allocs 64
 
@@ -1380,223 +1218,7 @@ retry:
     }
 };
 
-// Note that this class was written assuming just synchronization between
-// one background GC thread and multiple user threads that might request
-// an FGC - it does not take into account what kind of locks the multiple
-// user threads might be holding at the time (eg, there could only be one
-// user thread requesting an FGC because it needs to take gc_lock first)
-// so you'll see checks that may not be necessary if you take those conditions
-// into consideration.
-//
-// With the introduction of Server Background GC we no longer use this
-// class to do synchronization between FGCs and BGC.
-class recursive_gc_sync
-{
-    static VOLATILE(int32_t) foreground_request_count;//initial state 0
-    static VOLATILE(BOOL) gc_background_running; //initial state FALSE
-    static VOLATILE(int32_t) foreground_count; // initial state 0;
-    static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
-    static GCEvent foreground_complete;//Auto Reset
-    static GCEvent foreground_allowed;//Auto Reset
-public:
-    static void begin_background();
-    static void end_background();
-    static void begin_foreground();
-    static void end_foreground();
-    BOOL allow_foreground ();
-    static BOOL init();
-    static void shutdown();
-    static BOOL background_running_p() {return gc_background_running;}
-};
-
-VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
-VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
-VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
-VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
-GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
-GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset
-
-BOOL recursive_gc_sync::init ()
-{
-    foreground_request_count = 0;
-    foreground_count = 0;
-    gc_background_running = FALSE;
-    foreground_gate = 0;
-
-    if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
-    {
-        goto error;
-    }
-    if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
-    {
-        goto error;
-    }
-    return TRUE;
-
-error:
-    shutdown();
-    return FALSE;
-
-}
-
-void recursive_gc_sync::shutdown()
-{
-    if (foreground_complete.IsValid())
-        foreground_complete.CloseEvent();
-    if (foreground_allowed.IsValid())
-        foreground_allowed.CloseEvent();
-}
-
-void recursive_gc_sync::begin_background()
-{
-    dprintf (2, ("begin background"));
-    foreground_request_count = 1;
-    foreground_count = 1;
-    foreground_allowed.Reset();
-    gc_background_running = TRUE;
-}
-void recursive_gc_sync::end_background()
-{
-    dprintf (2, ("end background"));
-    gc_background_running = FALSE;
-    foreground_gate = 1;
-    foreground_allowed.Set();
-}
-
-void recursive_gc_sync::begin_foreground()
-{
-    dprintf (2, ("begin_foreground"));
-
-    bool cooperative_mode = false;
-    if (gc_background_running)
-    {
-        gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
-        gc_heap::alloc_wait_event_p = TRUE;
-
-try_again_top:
-
-        Interlocked::Increment (&foreground_request_count);
-
-try_again_no_inc:
-        dprintf(2, ("Waiting sync gc point"));
-        assert (foreground_allowed.IsValid());
-        assert (foreground_complete.IsValid());
-
-        cooperative_mode = gc_heap::enable_preemptive ();
-
-        foreground_allowed.Wait(INFINITE, FALSE);
-
-        dprintf(2, ("Waiting sync gc point is done"));
-
-        gc_heap::disable_preemptive (cooperative_mode);
-
-        if (foreground_gate)
-        {
-            Interlocked::Increment (&foreground_count);
-            dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
-            if (foreground_gate)
-            {
-                gc_heap::settings.concurrent = FALSE;
-                return;
-            }
-            else
-            {
-                end_foreground();
-                goto try_again_top;
-            }
-        }
-        else
-        {
-            goto try_again_no_inc;
-        }
-    }
-}
-
-void recursive_gc_sync::end_foreground()
-{
-    dprintf (2, ("end_foreground"));
-    if (gc_background_running)
-    {
-        Interlocked::Decrement (&foreground_request_count);
-        dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
-        if (Interlocked::Decrement (&foreground_count) == 0)
-        {
-            //c_write ((BOOL*)&foreground_gate, 0);
-            // TODO - couldn't make the syntax work with Volatile<T>
-            foreground_gate = 0;
-            if (foreground_count == 0)
-            {
-                foreground_allowed.Reset ();
-                dprintf(2, ("setting foreground complete event"));
-                foreground_complete.Set();
-            }
-        }
-    }
-}
-
-inline
-BOOL recursive_gc_sync::allow_foreground()
-{
-    assert (gc_heap::settings.concurrent);
-    dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
-                   (int32_t)foreground_request_count, (int32_t)foreground_count));
-
-    BOOL did_fgc = FALSE;
-
-    //if we have suspended the EE, just return because
-    //some thread could be waiting on this to proceed.
-    if (!GCHeap::GcInProgress)
-    {
-        //TODO BACKGROUND_GC This is to stress the concurrency between
-        //background and foreground
-//        gc_heap::disallow_new_allocation (0);
-
-        //GCToOSInterface::YieldThread(0);
-
-        //END of TODO
-        if (foreground_request_count != 0)
-        {
-            //foreground wants to run
-            //save the important settings
-            //TODO BACKGROUND_GC be more selective about the important settings.
-            gc_mechanisms saved_settings = gc_heap::settings;
-            do
-            {
-                did_fgc = TRUE;
-                //c_write ((BOOL*)&foreground_gate, 1);
-                // TODO - couldn't make the syntax work with Volatile<T>
-                foreground_gate = 1;
-                foreground_allowed.Set ();
-                foreground_complete.Wait (INFINITE, FALSE);
-            }while (/*foreground_request_count ||*/ foreground_gate);
-
-            assert (!foreground_gate);
-
-            //restore the important settings
-            gc_heap::settings = saved_settings;
-            GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
-            //the background GC shouldn't be using gc_high and gc_low
-            //gc_low = lowest_address;
-            //gc_high = highest_address;
-        }
-
-        //TODO BACKGROUND_GC This is to stress the concurrency between
-        //background and foreground
-//        gc_heap::allow_new_allocation (0);
-        //END of TODO
-    }
-
-    dprintf (100, ("leave allow_foreground"));
-    assert (gc_heap::settings.concurrent);
-    return did_fgc;
-}
-
-#endif //BACKGROUND_GC
-#endif //DACCESS_COMPILE
-
-#ifndef MULTIPLE_HEAPS
-
-#endif // MULTIPLE_HEAPS
+#endif //BACKGROUND_GC && !DACCESS_COMPILE
 
 void reset_memory (uint8_t* o, size_t sizeo);
 
@@ -1610,8 +1232,6 @@ static bool hardware_write_watch_capability = false;
 
 #ifndef DACCESS_COMPILE
 
-//check if the write watch APIs are supported.
-
 void hardware_write_watch_api_supported()
 {
     if (GCToOSInterface::SupportsWriteWatch())
@@ -1624,7 +1244,6 @@ void hardware_write_watch_api_supported()
         dprintf (2,("WriteWatch not supported"));
     }
 }
-
 #endif //!DACCESS_COMPILE
 
 inline bool can_use_hardware_write_watch()
@@ -1949,24 +1568,12 @@ void gc_heap::disable_preemptive (bool restore_cooperative)
 #endif // !DACCESS_COMPILE
 
 typedef void **  PTR_PTR;
-//This function clears a piece of memory
-// size has to be Dword aligned
-
 inline
 void memclr ( uint8_t* mem, size_t size)
 {
     dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
     assert ((size & (sizeof(PTR_PTR)-1)) == 0);
     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
-
-#if 0
-    // The compiler will recognize this pattern and replace it with memset call. We can as well just call
-    // memset directly to make it obvious what's going on.
-    PTR_PTR m = (PTR_PTR) mem;
-    for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
-        *(m++) = 0;
-#endif
-
     memset (mem, 0, size);
 }
 
@@ -1976,7 +1583,6 @@ void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
     const size_t sz2ptr = sizeof(PTR_PTR)*2;
     const size_t sz1ptr = sizeof(PTR_PTR)*1;
 
-    // size must be a multiple of the pointer size
     assert ((size & (sizeof (PTR_PTR)-1)) == 0);
     assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);
 
@@ -2008,10 +1614,7 @@ void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
     if (size & sz1ptr)
     {
         ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
-        // dmem += sz1ptr;
-        // smem += sz1ptr;
     }
-
 }
 
 inline
@@ -2032,10 +1635,6 @@ ptrdiff_t round_down (ptrdiff_t add, int pitch)
 #error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
 #endif
 
-#if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
-#error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
-#endif
-
 // Returns true if two pointers have the same large (double than normal) alignment.
 inline
 BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
@@ -2251,7 +1850,6 @@ size_t align_on_segment_hard_limit (size_t add)
 
 #endif //SERVER_GC
 
-//amount in bytes of the etw allocation tick
 const size_t etw_allocation_tick = 100*1024;
 
 const size_t low_latency_alloc = 256*1024;
@@ -2262,7 +1860,6 @@ const size_t fgn_check_quantum = 2*1024*1024;
 const int max_snoop_level = 128;
 #endif //MH_SC_MARK
 
-
 #ifdef CARD_BUNDLE
 //threshold of heap size to turn on card bundles.
 #define SH_TH_CARD_BUNDLE  (40*1024*1024)
@@ -2307,7 +1904,6 @@ uint8_t* align_write_watch_lower_page (uint8_t* add)
     return (uint8_t*)align_lower_page ((size_t)add);
 }
 
-
 inline
 BOOL power_of_two_p (size_t integer)
 {
@@ -2467,11 +2063,9 @@ void* virtual_alloc (size_t size, bool use_large_pages_p);
 void virtual_free (void* add, size_t size);
 
 /* per heap static initialization */
-#ifdef MARK_ARRAY
-#ifndef MULTIPLE_HEAPS
+#if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
 uint32_t*   gc_heap::mark_array;
-#endif //MULTIPLE_HEAPS
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC && !MULTIPLE_HEAPS
 
 #ifdef MARK_LIST
 uint8_t**   gc_heap::g_mark_list;
@@ -2483,13 +2077,11 @@ uint8_t**   gc_heap::g_mark_list_copy;
 size_t      gc_heap::mark_list_size;
 #endif //MARK_LIST
 
-#ifdef SEG_MAPPING_TABLE
 seg_mapping* seg_mapping_table;
-#endif //SEG_MAPPING_TABLE
 
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
 sorted_table* gc_heap::seg_table;
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 
 #ifdef MULTIPLE_HEAPS
 GCEvent     gc_heap::ee_suspend_event;
@@ -2623,6 +2215,7 @@ BOOL        gc_heap::alloc_wait_event_p = FALSE;
 
 VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;
 
+VOLATILE(BOOL) gc_heap::gc_background_running = FALSE;
 #endif //BACKGROUND_GC
 
 #ifndef MULTIPLE_HEAPS
@@ -2731,7 +2324,7 @@ mark*       gc_heap::mark_stack_array = 0;
 
 #if defined (_DEBUG) && defined (VERIFY_HEAP)
 BOOL        gc_heap::verify_pinned_queue_p = FALSE;
-#endif // defined (_DEBUG) && defined (VERIFY_HEAP)
+#endif //_DEBUG && VERIFY_HEAP
 
 uint8_t*    gc_heap::oldest_pinned_plug = 0;
 
@@ -2757,7 +2350,7 @@ EEThreadId  gc_heap::bgc_thread_id;
 
 uint8_t*    gc_heap::background_written_addresses [array_size+2];
 
-heap_segment* gc_heap::freeable_small_heap_segment = 0;
+heap_segment* gc_heap::freeable_soh_segment = 0;
 
 size_t      gc_heap::bgc_overflow_count = 0;
 
@@ -2907,14 +2500,11 @@ uint64_t gc_heap::total_loh_a_last_bgc = 0;
 size_t gc_heap::eph_gen_starts_size = 0;
 heap_segment* gc_heap::segment_standby_list;
 bool          gc_heap::use_large_pages_p = 0;
-size_t        gc_heap::last_gc_index = 0;
 #ifdef HEAP_BALANCE_INSTRUMENTATION
 size_t        gc_heap::last_gc_end_time_ms = 0;
 #endif //HEAP_BALANCE_INSTRUMENTATION
-#ifdef SEG_MAPPING_TABLE
 size_t        gc_heap::min_segment_size = 0;
 size_t        gc_heap::min_segment_size_shr = 0;
-#endif //SEG_MAPPING_TABLE
 size_t        gc_heap::soh_segment_size = 0;
 size_t        gc_heap::min_uoh_segment_size = 0;
 size_t        gc_heap::segment_info_size = 0;
@@ -3427,7 +3017,7 @@ in_range_for_segment(uint8_t* add, heap_segment* seg)
     return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
 }
 
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
 // The array we allocate is organized as follows:
 // 0th element is the address of the last array we allocated.
 // starting from the 1st element are the segment addresses, that's
@@ -3631,7 +3221,6 @@ sorted_table::remove (uint8_t* add)
         {
             if (buck[ti-1].add <= add)
             {
-                // found the guy to remove
                 for (ptrdiff_t k = ti; k < count; k++)
                     buck[k-1] = buck[k];
                 count--;
@@ -3643,7 +3232,6 @@ sorted_table::remove (uint8_t* add)
         {
             if (buck[ti+1].add > add)
             {
-                // found the guy to remove
                 for (ptrdiff_t k = ti+1; k < count; k++)
                     buck[k-1] = buck[k];
                 count--;
@@ -3661,10 +3249,8 @@ sorted_table::clear()
     count = 1;
     buckets()[0].add = MAX_PTR;
 }
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 
-#ifdef SEG_MAPPING_TABLE
-#ifdef GROWABLE_SEG_MAPPING_TABLE
 inline
 uint8_t* align_on_segment (uint8_t* add)
 {
@@ -3697,33 +3283,6 @@ size_t seg_mapping_word_of (uint8_t* add)
 {
     return (size_t)add >> gc_heap::min_segment_size_shr;
 }
-#else //GROWABLE_SEG_MAPPING_TABLE
-BOOL seg_mapping_table_init()
-{
-#ifdef HOST_64BIT
-    uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
-#else
-    uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
-#endif // HOST_64BIT
-
-    size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
-    seg_mapping_table = new seg_mapping[num_entries];
-
-    if (seg_mapping_table)
-    {
-        memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
-        dprintf (1, ("created %d entries for heap mapping (%Id bytes)",
-                     num_entries, (num_entries * sizeof (seg_mapping))));
-        return TRUE;
-    }
-    else
-    {
-        dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)",
-                     num_entries, (num_entries * sizeof (seg_mapping))));
-        return FALSE;
-    }
-}
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
 #ifdef FEATURE_BASICFREEZE
 inline
@@ -3744,10 +3303,8 @@ size_t ro_seg_end_index (heap_segment* seg)
 
 void seg_mapping_table_add_ro_segment (heap_segment* seg)
 {
-#ifdef GROWABLE_SEG_MAPPING_TABLE
     if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
         return;
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
     for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
         seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
@@ -3906,6 +3463,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
         seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
 #endif //FEATURE_BASICFREEZE
 
+#ifdef TRACE_GC
     if (seg)
     {
         if (in_range_for_segment (o, seg))
@@ -3922,6 +3480,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
     {
         dprintf (2, ("could not find obj %Ix in any existing segments", o));
     }
+#endif //TRACE_GC
 #endif //_DEBUG
 
     return hp;
@@ -3929,20 +3488,18 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
 
 gc_heap* seg_mapping_table_heap_of (uint8_t* o)
 {
-#ifdef GROWABLE_SEG_MAPPING_TABLE
     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
         return 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
     return seg_mapping_table_heap_of_worker (o);
 }
 
 gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
 {
-#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
+#ifdef FEATURE_BASICFREEZE
     if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
         return 0;
-#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
+#endif //FEATURE_BASICFREEZE
 
     return seg_mapping_table_heap_of_worker (o);
 }
@@ -3951,14 +3508,10 @@ gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
 // Only returns a valid seg if we can actually find o on the seg.
 heap_segment* seg_mapping_table_segment_of (uint8_t* o)
 {
-#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
-    if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
 #ifdef FEATURE_BASICFREEZE
+    if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
         return ro_segment_lookup (o);
-#else
-        return 0;
 #endif //FEATURE_BASICFREEZE
-#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
 
     size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
     seg_mapping* entry = &seg_mapping_table[index];
@@ -4008,7 +3561,6 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o)
 
     return seg;
 }
-#endif //SEG_MAPPING_TABLE
 
 size_t gcard_of ( uint8_t*);
 
@@ -4032,10 +3584,8 @@ public:
         return ((ArrayBase *)this)->GetNumComponents();
     }
 
-    void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
+    void Validate(BOOL bDeep=TRUE)
     {
-        UNREFERENCED_PARAMETER(bVerifyNextHeader);
-
         MethodTable * pMT = GetMethodTable();
 
         _ASSERTE(pMT->SanityCheck());
@@ -4078,19 +3628,9 @@ public:
         }
     }
 
-    void ValidatePromote(ScanContext *sc, uint32_t flags)
+    void ValidateHeap(BOOL bDeep)
     {
-        UNREFERENCED_PARAMETER(sc);
-        UNREFERENCED_PARAMETER(flags);
-
-        Validate();
-    }
-
-    void ValidateHeap(Object *from, BOOL bDeep)
-    {
-        UNREFERENCED_PARAMETER(from);
-
-        Validate(bDeep, FALSE);
+        Validate(bDeep);
     }
 
 #endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE
@@ -4703,13 +4243,11 @@ static size_t get_valid_segment_size (BOOL large_seg=FALSE)
             seg_size = initial_seg_size;
     }
 
-#ifdef SEG_MAPPING_TABLE
 #ifdef HOST_64BIT
     seg_size = round_up_power2 (seg_size);
 #else
     seg_size = round_down_power2 (seg_size);
 #endif // HOST_64BIT
-#endif //SEG_MAPPING_TABLE
 
     return (seg_size);
 }
@@ -4772,7 +4310,7 @@ gc_heap::soh_get_segment_to_expand()
     if ((settings.pause_mode != pause_low_latency) &&
         (settings.pause_mode != pause_no_gc)
 #ifdef BACKGROUND_GC
-        && (!recursive_gc_sync::background_running_p())
+        && (!gc_heap::background_running_p())
 #endif //BACKGROUND_GC
         )
     {
@@ -4942,18 +4480,12 @@ gc_heap::get_segment (size_t size, BOOL loh_p)
         }
 #endif //BACKGROUND_GC
 
-#ifdef SEG_MAPPING_TABLE
         if (result)
             seg_mapping_table_add_segment (result, __this);
-#endif //SEG_MAPPING_TABLE
     }
 
     if (!result)
     {
-#ifndef SEG_MAPPING_TABLE
-        if (!seg_table->ensure_space_for_insert ())
-            return 0;
-#endif //SEG_MAPPING_TABLE
         void* mem = virtual_alloc (size);
         if (!mem)
         {
@@ -4999,11 +4531,7 @@ gc_heap::get_segment (size_t size, BOOL loh_p)
 
         if (result)
         {
-#ifdef SEG_MAPPING_TABLE
             seg_mapping_table_add_segment (result, __this);
-#else //SEG_MAPPING_TABLE
-            gc_heap::seg_table->insert ((uint8_t*)result, delta);
-#endif //SEG_MAPPING_TABLE
         }
     }
 
@@ -5110,23 +4638,6 @@ gc_heap::get_uoh_segment (int gen_number, size_t size, BOOL* did_full_compact_gc
     return res;
 }
 
-#if 0
-BOOL gc_heap::unprotect_segment (heap_segment* seg)
-{
-    uint8_t* start = align_lower_page (heap_segment_mem (seg));
-    ptrdiff_t region_size = heap_segment_allocated (seg) - start;
-
-    if (region_size != 0 )
-    {
-        dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
-
-        BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
-        assert (status);
-        return status;
-    }
-    return FALSE;
-}
-#endif
 
 #ifdef MULTIPLE_HEAPS
 #ifdef HOST_X86
@@ -5264,7 +4775,9 @@ public:
 
     static int select_heap(alloc_context* acontext)
     {
+#ifndef TRACE_GC
         UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf
+#endif //TRACE_GC
 
         if (GCToOSInterface::CanGetCurrentProcessorNumber())
         {
@@ -6003,9 +5516,9 @@ bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_numb
                 return true;
         }
     }
-#else
+#else //MULTIPLE_HEAPS && !FEATURE_REDHAWK
     UNREFERENCED_PARAMETER(h_number);
-#endif
+#endif //MULTIPLE_HEAPS && !FEATURE_REDHAWK
 
     //numa aware not enabled, or call failed --> fallback to VirtualCommit()
     return GCToOSInterface::VirtualCommit(addr, size);
@@ -6092,36 +5605,6 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int h_number)
     return decommit_succeeded_p;
 }
 
-#ifndef SEG_MAPPING_TABLE
-inline
-heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
-{
-    uint8_t* sadd = add;
-    heap_segment* hs = 0;
-    heap_segment* hs1 = 0;
-    if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
-    {
-        delta = 0;
-        return 0;
-    }
-    //repeat in case there is a concurrent insertion in the table.
-    do
-    {
-        hs = hs1;
-        sadd = add;
-        seg_table->lookup (sadd);
-        hs1 = (heap_segment*)sadd;
-    } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));
-
-    hs = hs1;
-
-    if ((hs == 0) ||
-        (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
-        delta = 0;
-    return hs;
-}
-#endif //SEG_MAPPING_TABLE
-
 class mark
 {
 public:
@@ -6362,7 +5845,7 @@ void gc_mechanisms::init_mechanisms()
     elevation_reduced = FALSE;
     found_finalizers = FALSE;
 #ifdef BACKGROUND_GC
-    background_p = recursive_gc_sync::background_running_p() != FALSE;
+    background_p = gc_heap::background_running_p() != FALSE;
     allocations_allowed = TRUE;
 #endif //BACKGROUND_GC
 
@@ -6443,10 +5926,8 @@ void gc_mechanisms::record (gc_history_global* history)
 
 //for_gc_p indicates that the work is being done for GC,
 //as opposed to concurrent heap verification
-void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
+void gc_heap::fix_youngest_allocation_area()
 {
-    UNREFERENCED_PARAMETER(for_gc_p);
-
     // The gen 0 alloc context is never used for allocation in the allocator path. It's
     // still used in the allocation path during GCs.
     assert (generation_allocation_pointer (youngest_generation) == nullptr);
@@ -6454,10 +5935,8 @@ void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
     heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
 }
 
-void gc_heap::fix_uoh_allocation_area (BOOL for_gc_p)
+void gc_heap::fix_uoh_allocation_area()
 {
-    UNREFERENCED_PARAMETER(for_gc_p);
-
     for (int i = uoh_start_generation; i < total_generation_count; i++)
     {
 #ifdef _DEBUG
@@ -6585,8 +6064,8 @@ void gc_heap::fix_allocation_contexts (BOOL for_gc_p)
     args.heap = __this;
 
     GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
-    fix_youngest_allocation_area(for_gc_p);
-    fix_uoh_allocation_area(for_gc_p);
+    fix_youngest_allocation_area();
+    fix_uoh_allocation_area();
 }
 
 void gc_heap::fix_older_allocation_area (generation* older_gen)
@@ -6665,23 +6144,6 @@ void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
     set_allocation_heap_segment (gen);
 }
 
-#ifdef BACKGROUND_GC
-//TODO BACKGROUND_GC this is for test only
-void
-gc_heap::disallow_new_allocation (int gen_number)
-{
-    UNREFERENCED_PARAMETER(gen_number);
-    settings.allocations_allowed = FALSE;
-}
-void
-gc_heap::allow_new_allocation (int gen_number)
-{
-    UNREFERENCED_PARAMETER(gen_number);
-    settings.allocations_allowed = TRUE;
-}
-
-#endif //BACKGROUND_GC
-
 bool gc_heap::new_allocation_allowed (int gen_number)
 {
 #ifdef BACKGROUND_GC
@@ -6874,23 +6336,6 @@ void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plu
     }
 }
 
-void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
-{
-    dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
-    dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
-    if (!(pinned_plug_que_empty_p()))
-    {
-        mark*  oldest_entry = oldest_pin();
-        uint8_t* plug = pinned_plug (oldest_entry);
-        if ((plug >= alloc_pointer) && (plug < alloc_limit))
-        {
-            alloc_limit = pinned_plug (oldest_entry);
-            dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
-                alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
-        }
-    }
-}
-
 void gc_heap::set_allocator_next_pin (generation* gen)
 {
     dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
@@ -6914,22 +6359,11 @@ void gc_heap::set_allocator_next_pin (generation* gen)
 }
 
 // After we set the info, we increase tos.
-void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
-{
-    UNREFERENCED_PARAMETER(last_pinned_plug);
-
-    mark& m = mark_stack_array[mark_stack_tos];
-    assert (m.first == last_pinned_plug);
-
-    m.len = plug_len;
-    mark_stack_tos++;
-    set_allocator_next_pin (alloc_pointer, alloc_limit);
-}
-
-// After we set the info, we increase tos.
 void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
 {
+#ifndef _DEBUG
     UNREFERENCED_PARAMETER(last_pinned_plug);
+#endif //_DEBUG
 
     mark& m = mark_stack_array[mark_stack_tos];
     assert (m.first == last_pinned_plug);
@@ -7380,9 +6814,9 @@ public:
 
     // mark_array is always at the end of the data structure because we
     // want to be able to make one commit call for everything before it.
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     uint32_t*   mark_array;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     size_t      size;
     uint32_t*   next_card_table;
@@ -7426,9 +6860,7 @@ uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
 }
 #endif //CARD_BUNDLE
 
-#ifdef MARK_ARRAY
-/* Support for mark_array */
-
+#ifdef BACKGROUND_GC
 inline
 uint32_t*& card_table_mark_array (uint32_t* c_table)
 {
@@ -7590,10 +7022,8 @@ void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=T
     if ((end <= current_highest_address) && (from >= current_lowest_address))
     {
         size_t beg_word = mark_word_of (align_on_mark_word (from));
-        MAYBE_UNUSED_VAR(beg_word);
         //align end word to make sure to cover the address
         size_t end_word = mark_word_of (align_on_mark_word (end));
-        MAYBE_UNUSED_VAR(end_word);
         dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
                      (size_t)mark_word_address (beg_word),
                      (size_t)mark_word_address (end_word),
@@ -7633,7 +7063,7 @@ void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=T
 #endif //_DEBUG
     }
 }
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
 //These work on untranslated card tables
 inline
@@ -7725,13 +7155,13 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
 
     size_t bs = size_brick_of (start, end);
     size_t cs = size_card_of (start, end);
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     size_t ms = (gc_can_use_concurrent ?
                  size_mark_array_of (start, end) :
                  0);
 #else
     size_t ms = 0;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     size_t cb = 0;
 
@@ -7758,15 +7188,11 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
     }
 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
 
-#ifdef GROWABLE_SEG_MAPPING_TABLE
     size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
     size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
     size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
 
     st += (st_table_offset_aligned - st_table_offset);
-#else //GROWABLE_SEG_MAPPING_TABLE
-    size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
     // it is impossible for alloc_size to overflow due bounds on each of
     // its components.
@@ -7814,18 +7240,16 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
     }
 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
 
-#ifdef GROWABLE_SEG_MAPPING_TABLE
     seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
     seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
                                         size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     if (gc_can_use_concurrent)
         card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
     else
         card_table_mark_array (ct) = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     return translate_card_table(ct);
 }
@@ -7927,13 +7351,13 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
         size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
         size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
         size_t ms = (gc_heap::gc_can_use_concurrent ?
                     size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
                     0);
 #else
         size_t ms = 0;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
         size_t cb = 0;
 
@@ -7964,14 +7388,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
         }
 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
 
-#ifdef GROWABLE_SEG_MAPPING_TABLE
         size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
         size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
         size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
         st += (st_table_offset_aligned - st_table_offset);
-#else //GROWABLE_SEG_MAPPING_TABLE
-        size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
 
         // it is impossible for alloc_size to overflow due bounds on each of
         // its components.
@@ -8028,29 +7448,25 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
         memset(card_table_card_bundle_table (ct), 0xFF, cb);
 #endif //CARD_BUNDLE
 
-#ifdef GROWABLE_SEG_MAPPING_TABLE
-        {
-            new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
-            new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
-                                              size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
-            memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
-                &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
-                size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
+        new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
+        new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
+                                            size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
+        memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+            &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+            size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
 
-            // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
-            // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
-            // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
-            // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
-            // if an OOM occurs.
-        }
-#endif //GROWABLE_SEG_MAPPING_TABLE
+        // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
+        // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
+        // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
+        // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
+        // if an OOM occurs.
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
         if(gc_can_use_concurrent)
             card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
         else
             card_table_mark_array (ct) = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
         translated_ct = translate_card_table (ct);
 
@@ -8165,8 +7581,6 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
         return 0;
 
 fail:
-        //cleanup mess and return -1;
-
         if (mem)
         {
             assert(g_gc_card_table == saved_g_card_table);
@@ -8207,7 +7621,6 @@ fail:
 //copy all of the arrays managed by the card table for a page aligned range
 void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
                                      short* old_brick_table,
-                                     heap_segment* seg,
                                      uint8_t* start, uint8_t* end)
 {
     ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
@@ -8230,10 +7643,8 @@ void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
     }
 
     uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
-#ifdef MARK_ARRAY
-#ifdef BACKGROUND_GC
-    UNREFERENCED_PARAMETER(seg);
-    if (recursive_gc_sync::background_running_p())
+
+    if (gc_heap::background_running_p())
     {
         uint32_t* old_mark_array = card_table_mark_array (old_ct);
 
@@ -8261,11 +7672,6 @@ void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
             assert (old_brick_table == 0);
         }
     }
-#else //BACKGROUND_GC
-    assert (seg != 0);
-    clear_mark_array (start, heap_segment_committed(seg));
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
 
     // n way merge with all of the card table ever used in between
     uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);
@@ -8318,13 +7724,13 @@ void gc_heap::init_brick_card_range (heap_segment* seg)
         set_brick (b, -1);
     }
 
-#ifdef MARK_ARRAY
-    if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
+#ifdef BACKGROUND_GC
+    if (gc_heap::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
     {
         assert (seg != 0);
         clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
     }
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     clear_card_for_addresses (heap_segment_mem (seg),
                               heap_segment_allocated (seg));
@@ -8332,14 +7738,15 @@ void gc_heap::init_brick_card_range (heap_segment* seg)
 
 void gc_heap::copy_brick_card_table()
 {
-    uint8_t* la = lowest_address;
-    uint8_t* ha = highest_address;
-    MAYBE_UNUSED_VAR(ha);
     uint32_t* old_card_table = card_table;
     short* old_brick_table = brick_table;
 
+    uint8_t* la = lowest_address;
+#ifdef _DEBUG
+    uint8_t* ha = highest_address;
     assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
     assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
+#endif //_DEBUG
 
     /* todo: Need a global lock for this */
     uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
@@ -8351,7 +7758,7 @@ void gc_heap::copy_brick_card_table()
 
     brick_table = card_table_brick_table (ct);
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     if (gc_can_use_concurrent)
     {
         mark_array = translate_mark_array (card_table_mark_array (ct));
@@ -8360,19 +7767,9 @@ void gc_heap::copy_brick_card_table()
     }
     else
         mark_array = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
 #ifdef CARD_BUNDLE
-#if defined(MARK_ARRAY) && defined(_DEBUG)
-    size_t cb_end = (size_t)((uint8_t*)card_table_card_bundle_table (ct) + size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address));
-#ifdef GROWABLE_SEG_MAPPING_TABLE
-    size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
-    size_t cb_end_aligned = align_for_seg_mapping_table (cb_end);
-    st += (cb_end_aligned - cb_end);
-#else  //GROWABLE_SEG_MAPPING_TABLE
-    size_t st = 0;
-#endif //GROWABLE_SEG_MAPPING_TABLE
-#endif //MARK_ARRAY && _DEBUG
     card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
 
     // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
@@ -8422,7 +7819,6 @@ void gc_heap::copy_brick_card_table()
                 uint8_t* end = align_on_page (heap_segment_allocated (seg));
                 copy_brick_card_range (la, old_card_table,
                     (i < uoh_start_generation) ? old_brick_table : NULL,
-                    seg,
                     align_lower_page (heap_segment_mem (seg)),
                     end);
             }
@@ -8453,11 +7849,8 @@ BOOL gc_heap::insert_ro_segment (heap_segment* seg)
 
     seg_table->insert (heap_segment_mem(seg), (size_t)seg);
 
-#ifdef SEG_MAPPING_TABLE
     seg_mapping_table_add_ro_segment (seg);
-#endif //SEG_MAPPING_TABLE
 
-    //test if in range
     if ((heap_segment_reserved (seg) > lowest_address) &&
         (heap_segment_mem (seg) < highest_address))
     {
@@ -8476,22 +7869,19 @@ BOOL gc_heap::insert_ro_segment (heap_segment* seg)
 void gc_heap::remove_ro_segment (heap_segment* seg)
 {
 //clear the mark bits so a new segment allocated in its place will have a clear mark bits
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     if (gc_can_use_concurrent)
     {
         clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
                       align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
                       false); // read_only segments need the mark clear
     }
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     enter_spin_lock (&gc_heap::gc_lock);
 
     seg_table->remove ((uint8_t*)seg);
-
-#ifdef SEG_MAPPING_TABLE
     seg_mapping_table_remove_ro_segment (seg);
-#endif //SEG_MAPPING_TABLE
 
     // Locate segment (and previous segment) in the list.
     generation* gen2 = generation_of (max_generation);
@@ -8517,12 +7907,8 @@ void gc_heap::remove_ro_segment (heap_segment* seg)
 
 BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
 {
-    //set it in range
     seg->flags |= heap_segment_flags_inrange;
-//    init_brick_card_range (seg);
     ro_segments_in_range = TRUE;
-    //right now, segments aren't protected
-    //unprotect_segment (seg);
     return TRUE;
 }
 
@@ -8821,8 +8207,9 @@ void gc_heap::sort_mark_list()
         gc_heap* heap;
         // find the heap x points into - searching cyclically from the last heap,
         // because in many cases the right heap is the next one or comes soon after
+#ifdef _DEBUG
         int last_heap_num = heap_num;
-        MAYBE_UNUSED_VAR(last_heap_num);
+#endif //_DEBUG
         do
         {
             heap_num++;
@@ -9624,7 +9011,7 @@ inline size_t my_get_size (Object* ob)
 #define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
 #endif //COLLECTIBLE_CLASS
 
-#if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
+#ifdef BACKGROUND_GC
 inline
 void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
 {
@@ -9700,28 +9087,7 @@ void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
         clear_batch_mark_array_bits (start, end);
     }
 }
-
-void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
-{
-    dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix",
-                  from, end));
-    int align_const = get_alignment_constant (!loh_p);
-
-    uint8_t* o = from;
-
-    while (o < end)
-    {
-        uint8_t*  next_o = o + Align (size (o), align_const);
-
-        if (background_object_marked (o, TRUE))
-        {
-            dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
-        }
-
-        o = next_o;
-    }
-}
-#endif //MARK_ARRAY && BACKGROUND_GC
+#endif //BACKGROUND_GC
 
 inline
 BOOL gc_heap::is_mark_set (uint8_t* o)
@@ -9779,13 +9145,11 @@ heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h
 {
     size_t initial_commit = SEGMENT_INITIAL_COMMIT;
 
-    //Commit the first page
     if (!virtual_commit (new_pages, initial_commit, h_number))
     {
         return 0;
     }
 
-    //overlay the heap_segment
     heap_segment* new_segment = (heap_segment*)new_pages;
 
     uint8_t* start = new_pages + segment_info_size;
@@ -9837,9 +9201,7 @@ void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
                 decommit_heap_segment (seg);
             }
 
-#ifdef SEG_MAPPING_TABLE
             seg_mapping_table_remove_segment (seg);
-#endif //SEG_MAPPING_TABLE
 
             heap_segment_next (seg) = segment_standby_list;
             segment_standby_list = seg;
@@ -9860,12 +9222,7 @@ void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
         decommit_mark_array_by_seg (seg);
 #endif //BACKGROUND_GC
 
-#ifdef SEG_MAPPING_TABLE
         seg_mapping_table_remove_segment (seg);
-#else //SEG_MAPPING_TABLE
-        seg_table->remove ((uint8_t*)seg);
-#endif //SEG_MAPPING_TABLE
-
         release_segment (seg);
     }
 }
@@ -9947,7 +9304,7 @@ void gc_heap::clear_gen0_bricks()
 #ifdef BACKGROUND_GC
 void gc_heap::rearrange_small_heap_segments()
 {
-    heap_segment* seg = freeable_small_heap_segment;
+    heap_segment* seg = freeable_soh_segment;
     while (seg)
     {
         heap_segment* next_seg = heap_segment_next (seg);
@@ -9955,7 +9312,7 @@ void gc_heap::rearrange_small_heap_segments()
         delete_heap_segment (seg, FALSE);
         seg = next_seg;
     }
-    freeable_small_heap_segment = 0;
+    freeable_soh_segment = 0;
 }
 #endif //BACKGROUND_GC
 
@@ -10039,7 +9396,6 @@ void gc_heap::rearrange_heap_segments(BOOL compacting)
 
 
 #ifdef WRITE_WATCH
-
 uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch
 
 #ifdef CARD_BUNDLE
@@ -10309,17 +9665,17 @@ void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr)
     fire_alloc_wait_event (awr, FALSE);
 }
 #endif //BACKGROUND_GC
-void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start, uint8_t* pointer)
+void gc_heap::make_generation (int gen_num, heap_segment* seg, uint8_t* start)
 {
     generation* gen = generation_of (gen_num);
 
     gen->gen_num = gen_num;
     gen->allocation_start = start;
-    gen->allocation_context.alloc_ptr = pointer;
-    gen->allocation_context.alloc_limit = pointer;
+    gen->allocation_context.alloc_ptr = 0;
+    gen->allocation_context.alloc_limit = 0;
     gen->allocation_context.alloc_bytes = 0;
     gen->allocation_context.alloc_bytes_uoh = 0;
-    gen->allocation_context_start_region = pointer;
+    gen->allocation_context_start_region = 0;
     gen->start_segment = seg;
     gen->allocation_segment = seg;
     gen->plan_allocation_start = 0;
@@ -10491,19 +9847,6 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
     }
 #endif //GC_CONFIG_DRIVEN
 
-#ifdef GC_STATS
-    GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
-    if (logFileName.Get() != nullptr)
-    {
-        GCStatistics::logFileName = _strdup(logFileName.Get());
-        GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
-        if (!GCStatistics::logFile)
-        {
-            return E_FAIL;
-        }
-    }
-#endif // GC_STATS
-
     HRESULT hres = S_OK;
 
 #ifdef WRITE_WATCH
@@ -10686,17 +10029,12 @@ gc_heap::init_semi_shared()
     }
 #endif //MARK_LIST
 
-#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
-    if (!seg_mapping_table_init())
-        goto cleanup;
-#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
-
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
     seg_table = sorted_table::make_sorted_table();
 
     if (!seg_table)
         goto cleanup;
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 
     segment_standby_list = 0;
 
@@ -10714,7 +10052,6 @@ gc_heap::init_semi_shared()
 
     memset (full_gc_counts, 0, sizeof (full_gc_counts));
 
-    last_gc_index = 0;
     should_expand_in_full_gc = FALSE;
 
 #ifdef FEATURE_LOH_COMPACTION
@@ -11038,8 +10375,8 @@ void gc_heap::add_saved_spinlock_info (
         spinlock_info_index = 0;
     }
 #else
-    MAYBE_UNUSED_VAR(enter_state);
-    MAYBE_UNUSED_VAR(take_state);
+    UNREFERENCED_PARAMETER(enter_state);
+    UNREFERENCED_PARAMETER(take_state);
 #endif //SPINLOCK_HISTORY
 }
 
@@ -11142,13 +10479,6 @@ gc_heap::init_gc_heap (int  h_number)
     gc_done_event_lock = -1;
     gc_done_event_set = false;
 
-#ifndef SEG_MAPPING_TABLE
-    if (!gc_heap::seg_table->ensure_space_for_insert ())
-    {
-        return 0;
-    }
-#endif //!SEG_MAPPING_TABLE
-
     heap_segment* seg = make_initial_segment (soh_gen0, h_number);
     if (!seg)
         return 0;
@@ -11157,11 +10487,7 @@ gc_heap::init_gc_heap (int  h_number)
                               (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
                               gc_etw_segment_small_object_heap);
 
-#ifdef SEG_MAPPING_TABLE
     seg_mapping_table_add_segment (seg, __this);
-#else //SEG_MAPPING_TABLE
-    seg_table->insert ((uint8_t*)seg, sdelta);
-#endif //SEG_MAPPING_TABLE
 
 #ifdef MULTIPLE_HEAPS
     heap_segment_heap (seg) = this;
@@ -11183,18 +10509,18 @@ gc_heap::init_gc_heap (int  h_number)
             card_table_card_bundle_table (ct));
 #endif //CARD_BUNDLE
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     if (gc_can_use_concurrent)
         mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
     else
         mark_array = NULL;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     uint8_t*  start = heap_segment_mem (seg);
 
     for (int i = max_generation; i >= 0; i--)
     {
-        make_generation (i, seg, start, 0);
+        make_generation (i, seg, start);
         start += Align (min_obj_size);
     }
 
@@ -11204,14 +10530,6 @@ gc_heap::init_gc_heap (int  h_number)
 
     ephemeral_heap_segment = seg;
 
-#ifndef SEG_MAPPING_TABLE
-    if (!gc_heap::seg_table->ensure_space_for_insert ())
-    {
-        return 0;
-    }
-#endif //!SEG_MAPPING_TABLE
-
-
      // Create segments for the large and pinned generations
     heap_segment* lseg = make_initial_segment(loh_generation, h_number);
     if (!lseg)
@@ -11233,16 +10551,11 @@ gc_heap::init_gc_heap (int  h_number)
                               (size_t)(heap_segment_reserved (pseg) - heap_segment_mem(pseg)),
                               gc_etw_segment_pinned_object_heap);
 
-#ifdef SEG_MAPPING_TABLE
     seg_mapping_table_add_segment (lseg, __this);
     seg_mapping_table_add_segment (pseg, __this);
-#else //SEG_MAPPING_TABLE
-    seg_table->insert ((uint8_t*)lseg, sdelta);
-    seg_table->insert ((uint8_t*)pseg, sdelta);
-#endif //SEG_MAPPING_TABLE
 
-    make_generation (loh_generation, lseg, heap_segment_mem (lseg), 0);
-    make_generation (poh_generation, pseg, heap_segment_mem (pseg), 0);
+    make_generation (loh_generation, lseg, heap_segment_mem (lseg));
+    make_generation (poh_generation, pseg, heap_segment_mem (pseg));
 
     heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
     heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
@@ -11271,26 +10584,6 @@ gc_heap::init_gc_heap (int  h_number)
 
 #endif //MULTIPLE_HEAPS
 
-    //Do this only once
-#ifdef MULTIPLE_HEAPS
-    if (h_number == 0)
-#endif //MULTIPLE_HEAPS
-    {
-#ifndef INTERIOR_POINTERS
-        //set the brick_table for large objects
-        //but default value is cleared
-        //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
-        //                   (uint8_t*)heap_segment_reserved (lseg));
-
-#else //INTERIOR_POINTERS
-
-        //Because of the interior pointer business, we have to clear
-        //the whole brick table
-        //but the default value is cleared
-        // clear_brick_table (lowest_address, highest_address);
-#endif //INTERIOR_POINTERS
-    }
-
     if (!init_dynamic_data())
     {
         return 0;
@@ -11322,7 +10615,7 @@ gc_heap::init_gc_heap (int  h_number)
     loh_a_bgc_planning = 0;
     bgc_maxgen_end_fl_size = 0;
 #endif //BGC_SERVO_TUNING
-    freeable_small_heap_segment = 0;
+    freeable_soh_segment = 0;
     gchist_index_per_heap = 0;
     uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
     if (!b_arr)
@@ -11344,15 +10637,6 @@ gc_heap::init_gc_heap (int  h_number)
         );
     }
 
-#ifdef MARK_ARRAY
-    // why would we clear the mark array for this page? it should be cleared..
-    // clear the first committed page
-    //if(gc_can_use_concurrent)
-    //{
-    //    clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
-    //}
-#endif //MARK_ARRAY
-
 #ifdef MULTIPLE_HEAPS
     get_proc_and_numa_for_heap (heap_number);
     if (!create_gc_thread ())
@@ -11421,13 +10705,6 @@ gc_heap::init_gc_heap (int  h_number)
     }
 
     bgc_alloc_lock->init();
-
-    if (h_number == 0)
-    {
-        if (!recursive_gc_sync::init())
-            return 0;
-    }
-
     bgc_thread_running = 0;
     bgc_thread = 0;
     bgc_threads_timeout_cs.Initialize();
@@ -11464,15 +10741,13 @@ gc_heap::destroy_semi_shared()
         delete g_mark_list;
 #endif //MARK_LIST
 
-#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
     if (seg_mapping_table)
         delete seg_mapping_table;
-#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE
 
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
     //destroy the segment map
     seg_table->delete_sorted_table();
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 }
 
 void
@@ -11627,12 +10902,6 @@ BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool*
     bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_number, hard_limit_exceeded_p);
     if (ret)
     {
-#ifdef MARK_ARRAY
-#ifndef BACKGROUND_GC
-        clear_mark_array (heap_segment_committed (seg),
-                        heap_segment_committed (seg)+c_size, TRUE);
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
         heap_segment_committed (seg) += c_size;
 
         STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix\n",
@@ -11669,10 +10938,8 @@ int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t*
 }
 
 //used only in older generation allocation (i.e during gc).
-void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
-                            int gennum)
+void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen)
 {
-    UNREFERENCED_PARAMETER(gennum);
     dprintf (3, ("gc Expanding segment allocation"));
     heap_segment* seg = generation_allocation_segment (gen);
     if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
@@ -12140,11 +11407,6 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
         //memory. Let's catch up here
         if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
         {
-#ifdef MARK_ARRAY
-#ifndef BACKGROUND_GC
-            clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
-#endif //BACKGROUND_GC
-#endif //MARK_ARRAY
             heap_segment_used (seg) = alloc_allocated - plug_skew;
         }
     }
@@ -12299,11 +11561,9 @@ void gc_heap::add_to_oom_history_per_heap()
     }
 }
 
-void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
+void gc_heap::handle_oom (oom_reason reason, size_t alloc_size,
                           uint8_t* allocated, uint8_t* reserved)
 {
-    UNREFERENCED_PARAMETER(heap_num);
-
     if (reason == oom_budget)
     {
         alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
@@ -12474,7 +11734,7 @@ check_other_factors:
     // by the time the GC happens, we may not be under BGC anymore. If we try to
     // predict often enough it should be ok.
     if ((n == max_generation) &&
-        (recursive_gc_sync::background_running_p()))
+        (gc_heap::background_running_p()))
     {
         n = max_generation - 1;
         dprintf (2, ("FGN: bgc - 1 instead of 2"));
@@ -12583,11 +11843,8 @@ size_t gc_heap::get_full_compact_gc_count()
 // DTREVIEW - we should check this in dt_low_ephemeral_space_p
 // as well.
 inline
-BOOL gc_heap::short_on_end_of_seg (int gen_number,
-                                   heap_segment* seg,
-                                   int align_const)
+BOOL gc_heap::short_on_end_of_seg (heap_segment* seg, int align_const)
 {
-    UNREFERENCED_PARAMETER(gen_number);
     uint8_t* allocated = heap_segment_allocated(seg);
 
     BOOL sufficient_p = sufficient_space_end_seg (allocated,
@@ -13062,7 +12319,7 @@ void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p)
 
 void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p)
 {
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         uint32_t memory_load;
         get_memory_info (&memory_load);
@@ -13125,7 +12382,7 @@ BOOL gc_heap::soh_try_fit (int gen_number,
     {
         if (short_seg_end_p)
         {
-            *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
+            *short_seg_end_p = short_on_end_of_seg (ephemeral_heap_segment, align_const);
         }
         // If the caller doesn't care, we always try to fit at the end of seg;
         // otherwise we would only try if we are actually not short at end of seg.
@@ -13146,7 +12403,7 @@ allocation_state gc_heap::allocate_soh (int gen_number,
                                           int align_const)
 {
 #if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         background_soh_alloc_count++;
         if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
@@ -13287,7 +12544,7 @@ allocation_state gc_heap::allocate_soh (int gen_number,
                                                       align_const, &commit_failed_p,
                                                       &short_seg_end_p);
 #ifdef BACKGROUND_GC
-                    bgc_in_progress_p = recursive_gc_sync::background_running_p();
+                    bgc_in_progress_p = gc_heap::background_running_p();
 #endif //BACKGROUND_GC
 
                     if (can_use_existing_p)
@@ -13386,8 +12643,7 @@ exit:
     if (soh_alloc_state == a_state_cant_allocate)
     {
         assert (oom_r != oom_no_failure);
-        handle_oom (heap_number,
-                    oom_r,
+        handle_oom (oom_r,
                     size,
                     heap_segment_allocated (ephemeral_heap_segment),
                     heap_segment_reserved (ephemeral_heap_segment));
@@ -13476,11 +12732,7 @@ int gc_heap::bgc_poh_allocate_spin()
 size_t gc_heap::get_uoh_seg_size (size_t size)
 {
     size_t default_seg_size = min_uoh_segment_size;
-#ifdef SEG_MAPPING_TABLE
     size_t align_size =  default_seg_size;
-#else //SEG_MAPPING_TABLE
-    size_t align_size =  default_seg_size / 2;
-#endif //SEG_MAPPING_TABLE
     int align_const = get_alignment_constant (FALSE);
     size_t large_seg_size = align_on_page (
         max (default_seg_size,
@@ -13491,12 +12743,9 @@ size_t gc_heap::get_uoh_seg_size (size_t size)
 
 BOOL gc_heap::uoh_get_new_seg (int gen_number,
                                size_t size,
-                               int align_const,
                                BOOL* did_full_compact_gc,
                                oom_reason* oom_r)
 {
-    UNREFERENCED_PARAMETER(align_const);
-
     *did_full_compact_gc = FALSE;
 
     size_t seg_size = get_uoh_seg_size (size);
@@ -13550,7 +12799,7 @@ BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
     BOOL bgc_in_progress = FALSE;
     *did_full_compact_gc = FALSE;
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         bgc_in_progress = TRUE;
         size_t last_full_compact_gc_count = get_full_compact_gc_count();
@@ -13583,7 +12832,7 @@ BOOL gc_heap::uoh_try_fit (int gen_number,
                                                 commit_failed_p, oom_r);
 
 #ifdef BACKGROUND_GC
-        if (can_allocate && recursive_gc_sync::background_running_p())
+        if (can_allocate && gc_heap::background_running_p())
         {
             if (gen_number == poh_generation)
             {
@@ -13615,7 +12864,7 @@ BOOL gc_heap::trigger_full_compact_gc (gc_reason gr,
     }
 
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p);
         dprintf (2, ("waited for BGC - done"));
@@ -13713,7 +12962,7 @@ allocation_state gc_heap::allocate_uoh (int gen_number,
 {
 #ifdef BACKGROUND_GC
 
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
 #ifdef BGC_SERVO_TUNING
         bool planning_p = (current_c_gc_state == c_gc_state_planning);
@@ -13863,7 +13112,7 @@ allocation_state gc_heap::allocate_uoh (int gen_number,
 
                 current_full_compact_gc_count = get_full_compact_gc_count();
 
-                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r);
+                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r);
                 uoh_alloc_state = (can_get_new_seg_p ? 
                                         a_state_try_fit_new_seg : 
                                         (did_full_compacting_gc ? 
@@ -13878,7 +13127,7 @@ allocation_state gc_heap::allocate_uoh (int gen_number,
 
                 current_full_compact_gc_count = get_full_compact_gc_count();
 
-                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r);
+                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r);
                 // Since we release the msl before we try to allocate a seg, other
                 // threads could have allocated a bunch of segments before us so
                 // we might need to retry.
@@ -13894,7 +13143,7 @@ allocation_state gc_heap::allocate_uoh (int gen_number,
 
                 current_full_compact_gc_count = get_full_compact_gc_count();
 
-                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, align_const, &did_full_compacting_gc, &oom_r); 
+                can_get_new_seg_p = uoh_get_new_seg (gen_number, size, &did_full_compacting_gc, &oom_r); 
                 uoh_alloc_state = (can_get_new_seg_p ? 
                                         a_state_try_fit_new_seg : 
                                         (did_full_compacting_gc ? 
@@ -13972,8 +13221,7 @@ exit:
         }
         else
         {
-            handle_oom (heap_number,
-                        oom_r,
+            handle_oom (oom_r,
                         size,
                         0,
                         0);
@@ -14605,7 +13853,7 @@ CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_
 
 void  gc_heap::leave_allocation_segment (generation* gen)
 {
-    adjust_limit (0, 0, gen, max_generation);
+    adjust_limit (0, 0, gen);
 }
 
 void gc_heap::init_free_and_plug()
@@ -14805,7 +14053,7 @@ uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
                     generation_free_list_space (gen) -= free_list_size;
                     remove_gen_free (gen->gen_num, free_list_size);
 
-                    adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
+                    adjust_limit (free_list, free_list_size, gen);
                     generation_allocate_end_seg_p (gen) = FALSE;
                     goto finished;
                 }
@@ -14840,9 +14088,8 @@ uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
             {
                 dprintf (3, ("using what's left in committed"));
                 adjust_limit (heap_segment_plan_allocated (seg),
-                              heap_segment_committed (seg) -
-                              heap_segment_plan_allocated (seg),
-                              gen, from_gen_number+1);
+                              (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)),
+                              gen);
                 generation_allocate_end_seg_p (gen) = TRUE;
                 // dformat (t, 3, "Expanding segment allocation");
                 heap_segment_plan_allocated (seg) =
@@ -14857,9 +14104,8 @@ uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
                 {
                     dprintf (3, ("using what's left in reserved"));
                     adjust_limit (heap_segment_plan_allocated (seg),
-                                  heap_segment_committed (seg) -
-                                  heap_segment_plan_allocated (seg),
-                                  gen, from_gen_number+1);
+                                  (heap_segment_committed (seg) - heap_segment_plan_allocated (seg)),
+                                  gen);
                     generation_allocate_end_seg_p (gen) = TRUE;
                     heap_segment_plan_allocated (seg) =
                         heap_segment_committed (seg);
@@ -15658,7 +14904,7 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
         *blocking_collection_p = TRUE;
     }
 
-    if ((n < max_generation) && !recursive_gc_sync::background_running_p() &&
+    if ((n < max_generation) && !gc_heap::background_running_p() &&
         bgc_tuning::stepping_trigger (settings.entry_memory_load, get_current_gc_index (max_generation)))
     {
         gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_servo_initial);
@@ -15703,25 +14949,7 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
         g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
     {
 #ifndef FEATURE_REDHAWK
-        // for the GC stress mix mode throttle down gen2 collections
-        if (g_pConfig->IsGCStressMix())
-        {
-            size_t current_gc_count = 0;
-
-#ifdef MULTIPLE_HEAPS
-            current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
-#else
-            current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
-#endif //MULTIPLE_HEAPS
-            // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
-            if ((current_gc_count % 10) == 0)
-            {
-                gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_stress_mix);
-                n = max_generation;
-            }
-        }
-        // for traditional GC stress
-        else if (*blocking_collection_p)
+        if (*blocking_collection_p)
         {
             // We call StressHeap() a lot for Concurrent GC Stress. However,
             // if we can not do a concurrent collection, no need to stress anymore.
@@ -16039,7 +15267,7 @@ int gc_heap::generation_to_condemn (int n_initial,
         temp_gen = n;
 
 #ifdef BACKGROUND_GC
-        if (recursive_gc_sync::background_running_p()
+        if (gc_heap::background_running_p()
 #ifdef BGC_SERVO_TUNING
             || bgc_tuning::fl_tuning_triggered
             || (bgc_tuning::enable_fl_tuning && bgc_tuning::use_stepping_trigger_p)
@@ -16603,20 +15831,19 @@ void fire_overflow_event (uint8_t* overflow_min,
 
 void gc_heap::concurrent_print_time_delta (const char* msg)
 {
-    UNREFERENCED_PARAMETER(msg);
 #ifdef TRACE_GC
     size_t current_time = GetHighPrecisionTimeStamp();
     size_t elapsed_time = current_time - time_bgc_last;
     time_bgc_last = current_time;
 
     dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
+#else
+    UNREFERENCED_PARAMETER(msg);
 #endif //TRACE_GC
 }
 
 void gc_heap::free_list_info (int gen_num, const char* msg)
 {
-    UNREFERENCED_PARAMETER(gen_num);
-    UNREFERENCED_PARAMETER(msg);
 #if defined (BACKGROUND_GC) && defined (TRACE_GC)
     dprintf (3, ("h%d: %s", heap_number, msg));
     for (int i = 0; i < total_generation_count; i++)
@@ -16637,6 +15864,9 @@ void gc_heap::free_list_info (int gen_num, const char* msg)
                 generation_free_obj_space (gen)));
         }
     }
+#else
+    UNREFERENCED_PARAMETER(gen_num);
+    UNREFERENCED_PARAMETER(msg);
 #endif // BACKGROUND_GC && TRACE_GC
 }
 
@@ -16732,10 +15962,6 @@ void gc_heap::gc1()
 
             concurrent_print_time_delta ("BGC");
 
-//#ifdef WRITE_WATCH
-            //reset_write_watch (FALSE);
-//#endif //WRITE_WATCH
-
             concurrent_print_time_delta ("RW");
             background_mark_phase();
             free_list_info (max_generation, "after mark phase");
@@ -16943,12 +16169,6 @@ void gc_heap::gc1()
     }
 #endif // BACKGROUND_GC
 
-#ifdef GC_STATS
-    if (GCStatistics::Enabled() && heap_number == 0)
-        g_GCStatistics.AddGCStats(settings,
-            dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
-#endif // GC_STATS
-
 #ifdef BACKGROUND_GC
     assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
 #endif //BACKGROUND_GC
@@ -16993,7 +16213,7 @@ void gc_heap::gc1()
                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
                 dprintf (2, ("bgc_threads_sync_event is signalled"));
             }
-#else
+#else //MULTIPLE_HEAPS
             suspend_EE();
 #endif //MULTIPLE_HEAPS
 
@@ -17049,7 +16269,7 @@ void gc_heap::gc1()
                 bgc_threads_sync_event.Wait(INFINITE, FALSE);
                 dprintf (2, ("bgc_threads_sync_event is signalled"));
             }
-#else
+#else //MULTIPLE_HEAPS
             restart_EE();
 #endif //MULTIPLE_HEAPS
 
@@ -17057,7 +16277,7 @@ void gc_heap::gc1()
         }
 #endif //BACKGROUND_GC
     }
-#endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
+#endif //VERIFY_HEAP || (FEATURE_EVENT_TRACE && BACKGROUND_GC)
 
 #ifdef MULTIPLE_HEAPS
     if (!settings.concurrent)
@@ -17197,7 +16417,7 @@ void gc_heap::gc1()
         heap_select::mark_heap (heap_number);
     }
 
-#else
+#else //MULTIPLE_HEAPS
     gc_data_global.final_youngest_desired =
         dd_desired_allocation (dynamic_data_of (0));
 
@@ -17734,7 +16954,7 @@ BOOL gc_heap::expand_soh_with_minimal_gc()
         for (int i = condemned_gen_number; i >= 0; i--)
         {
             size_t gen_start_size = Align (min_obj_size);
-            make_generation (i, ephemeral_heap_segment, start, 0);
+            make_generation (i, ephemeral_heap_segment, start);
 
             generation* gen = generation_of (i);
             generation_plan_allocation_start (gen) = start;
@@ -17799,15 +17019,14 @@ void gc_heap::allocate_for_no_gc_after_gc()
 #ifdef MULTIPLE_HEAPS
             gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
             if (gc_t_join.joined())
-            {
 #endif //MULTIPLE_HEAPS
-
+            {
                 check_and_set_no_gc_oom();
 
 #ifdef MULTIPLE_HEAPS
                 gc_t_join.restart();
-            }
 #endif //MULTIPLE_HEAPS
+            }
         }
 
         if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
@@ -17888,9 +17107,8 @@ void gc_heap::allocate_for_no_gc_after_gc()
 #ifdef MULTIPLE_HEAPS
     gc_t_join.join(this, gc_join_final_no_gc);
     if (gc_t_join.joined())
-    {
 #endif //MULTIPLE_HEAPS
-
+    {
         check_and_set_no_gc_oom();
 
         if (current_no_gc_region_info.start_status == start_no_gc_success)
@@ -17901,8 +17119,8 @@ void gc_heap::allocate_for_no_gc_after_gc()
 
 #ifdef MULTIPLE_HEAPS
         gc_t_join.restart();
-    }
 #endif //MULTIPLE_HEAPS
+    }
 }
 
 void gc_heap::init_records()
@@ -18002,9 +17220,8 @@ void gc_heap::garbage_collect (int n)
 #ifdef MULTIPLE_HEAPS
         gc_t_join.join(this, gc_join_minimal_gc);
         if (gc_t_join.joined())
-        {
 #endif //MULTIPLE_HEAPS
-
+        {
 #ifdef MULTIPLE_HEAPS
             // this is serialized because we need to get a segment
             for (int i = 0; i < n_heaps; i++)
@@ -18021,8 +17238,8 @@ void gc_heap::garbage_collect (int n)
 
 #ifdef MULTIPLE_HEAPS
             gc_t_join.restart();
-        }
 #endif //MULTIPLE_HEAPS
+        }
 
         goto done;
     }
@@ -18046,17 +17263,16 @@ void gc_heap::garbage_collect (int n)
     //align all heaps on the max generation to condemn
     dprintf (3, ("Joining for max generation to condemn"));
     condemned_generation_num = generation_to_condemn (n,
-                                                    &blocking_collection,
-                                                    &elevation_requested,
-                                                    FALSE);
+                                                      &blocking_collection,
+                                                      &elevation_requested,
+                                                      FALSE);
     gc_t_join.join(this, gc_join_generation_determined);
     if (gc_t_join.joined())
 #endif //MULTIPLE_HEAPS
     {
-#if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
-        //delete old slots from the segment table
+#ifdef FEATURE_BASICFREEZE
         seg_table->delete_old_slots();
-#endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 
 #ifdef MULTIPLE_HEAPS
         for (int i = 0; i < n_heaps; i++)
@@ -18069,7 +17285,7 @@ void gc_heap::garbage_collect (int n)
             hp->rearrange_uoh_segments();
 #ifdef BACKGROUND_GC
             hp->background_delay_delete_uoh_segments();
-            if (!recursive_gc_sync::background_running_p())
+            if (!gc_heap::background_running_p())
                 hp->rearrange_small_heap_segments();
 #endif //BACKGROUND_GC
         }
@@ -18080,7 +17296,7 @@ void gc_heap::garbage_collect (int n)
         rearrange_uoh_segments();
 #ifdef BACKGROUND_GC
         background_delay_delete_uoh_segments();
-        if (!recursive_gc_sync::background_running_p())
+        if (!gc_heap::background_running_p())
             rearrange_small_heap_segments();
 #endif //BACKGROUND_GC
 #endif //MULTIPLE_HEAPS
@@ -18139,7 +17355,7 @@ void gc_heap::garbage_collect (int n)
 
 #ifdef BACKGROUND_GC
         if ((settings.condemned_generation == max_generation) &&
-            (recursive_gc_sync::background_running_p()))
+            (gc_heap::background_running_p()))
         {
             //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
             // because we have to collect 0 and 1 properly
@@ -18171,7 +17387,6 @@ void gc_heap::garbage_collect (int n)
 #endif //MULTIPLE_HEAPS
 
         // Call the EE for start of GC work
-        // just one thread for MP GC
         GCToEEInterface::GcStartWork (settings.condemned_generation,
                                 max_generation);
 
@@ -18183,7 +17398,6 @@ void gc_heap::garbage_collect (int n)
 
 #ifdef MULTIPLE_HEAPS
         gc_start_event.Reset();
-        //start all threads on the roots.
         dprintf(3, ("Starting all gc threads for gc"));
         gc_t_join.restart();
 #endif //MULTIPLE_HEAPS
@@ -18242,7 +17456,7 @@ void gc_heap::garbage_collect (int n)
             for (int i = 0; i < n_heaps; i++)
             {
                 gc_heap* hp = g_heaps[i];
-                if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
+                if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init())
                 {
                     do_concurrent_p = FALSE;
                     break;
@@ -18254,7 +17468,7 @@ void gc_heap::garbage_collect (int n)
                 }
             }
 #else
-            do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
+            do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init());
             if (do_concurrent_p)
             {
                 background_saved_lowest_address = lowest_address;
@@ -18277,8 +17491,6 @@ void gc_heap::garbage_collect (int n)
 
                 int gen = check_for_ephemeral_alloc();
                 // always do a gen1 GC before we start BGC.
-                // This is temporary for testing purpose.
-                //int gen = max_generation - 1;
                 dont_restart_ee_p = TRUE;
                 if (gen == -1)
                 {
@@ -18401,10 +17613,8 @@ size_t& gc_heap::promoted_bytes(int thread)
 #endif //MULTIPLE_HEAPS
 }
 
-#ifdef INTERIOR_POINTERS
 heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
 {
-#ifdef SEG_MAPPING_TABLE
     heap_segment* seg = seg_mapping_table_segment_of (interior);
     if (seg)
     {
@@ -18412,86 +17622,7 @@ heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_
             return 0;
     }
     return seg;
-#else //SEG_MAPPING_TABLE
-#ifdef MULTIPLE_HEAPS
-    for (int i = 0; i < gc_heap::n_heaps; i++)
-    {
-        gc_heap* h = gc_heap::g_heaps [i];
-        hs = h->find_segment_per_heap (o, small_segment_only_p);
-        if (hs)
-        {
-            break;
-        }
-    }
-#else
-    {
-        gc_heap* h = pGenGCHeap;
-        hs = h->find_segment_per_heap (o, small_segment_only_p);
-    }
-#endif //MULTIPLE_HEAPS
-#endif //SEG_MAPPING_TABLE
-}
-
-heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
-{
-#ifdef SEG_MAPPING_TABLE
-    return find_segment (interior, small_segment_only_p);
-#else //SEG_MAPPING_TABLE
-    if (in_range_for_segment (interior, ephemeral_heap_segment))
-    {
-        return ephemeral_heap_segment;
-    }
-    else
-    {
-        heap_segment* found_seg = 0;
-
-        {
-            heap_segment* seg = generation_start_segment (generation_of (max_generation));
-            do
-            {
-                if (in_range_for_segment (interior, seg))
-                {
-                    found_seg = seg;
-                    goto end_find_segment;
-                }
-
-            } while ((seg = heap_segment_next (seg)) != 0);
-        }
-        if (!small_segment_only_p)
-        {
-#ifdef BACKGROUND_GC
-            {
-                ptrdiff_t delta = 0;
-                heap_segment* seg = segment_of (interior, delta);
-                if (seg && in_range_for_segment (interior, seg))
-                {
-                    found_seg = seg;
-                }
-                goto end_find_segment;
-            }
-#else //BACKGROUND_GC
-            for (int i = uoh_start_generation; i < total_generation_count; i++)
-            {
-                heap_segment* seg = generation_start_segment (generation_of (i));
-                do
-                {
-                    if (in_range_for_segment(interior, seg))
-                    {
-                        found_seg = seg;
-                        goto end_find_segment;
-                    }
-
-                } while ((seg = heap_segment_next (seg)) != 0);
-            }
-#endif //BACKGROUND_GC
-        }
-end_find_segment:
-
-        return found_seg;
-    }
-#endif //SEG_MAPPING_TABLE
 }
-#endif //INTERIOR_POINTERS
 
 #if !defined(_DEBUG) && !defined(__GNUC__)
 inline // This causes link errors if global optimization is off
@@ -18501,14 +17632,8 @@ gc_heap* gc_heap::heap_of (uint8_t* o)
 #ifdef MULTIPLE_HEAPS
     if (o == 0)
         return g_heaps [0];
-#ifdef SEG_MAPPING_TABLE
     gc_heap* hp = seg_mapping_table_heap_of (o);
     return (hp ? hp : g_heaps[0]);
-#else //SEG_MAPPING_TABLE
-    ptrdiff_t delta = 0;
-    heap_segment* seg = segment_of (o, delta);
-    return (seg ? heap_segment_heap (seg) : g_heaps [0]);
-#endif //SEG_MAPPING_TABLE
 #else //MULTIPLE_HEAPS
     UNREFERENCED_PARAMETER(o);
     return __this;
@@ -18521,21 +17646,14 @@ gc_heap* gc_heap::heap_of_gc (uint8_t* o)
 #ifdef MULTIPLE_HEAPS
     if (o == 0)
         return g_heaps [0];
-#ifdef SEG_MAPPING_TABLE
     gc_heap* hp = seg_mapping_table_heap_of_gc (o);
     return (hp ? hp : g_heaps[0]);
-#else //SEG_MAPPING_TABLE
-    ptrdiff_t delta = 0;
-    heap_segment* seg = segment_of (o, delta);
-    return (seg ? heap_segment_heap (seg) : g_heaps [0]);
-#endif //SEG_MAPPING_TABLE
 #else //MULTIPLE_HEAPS
     UNREFERENCED_PARAMETER(o);
     return __this;
 #endif //MULTIPLE_HEAPS
 }
 
-#ifdef INTERIOR_POINTERS
 // will find all heap objects (large and small)
 uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
 {
@@ -18560,8 +17678,8 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
     int brick_entry = get_brick_entry(brick_of (interior));
     if (brick_entry == 0)
     {
-        // this is a pointer to a large object
-        heap_segment* seg = find_segment_per_heap (interior, FALSE);
+        // this is a pointer to a UOH object
+        heap_segment* seg = find_segment (interior, FALSE);
         if (seg
 #ifdef FEATURE_CONSERVATIVE_GC
             && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
@@ -18575,7 +17693,6 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
                                                        || (GCConfig::GetConservativeGC() && !heap_segment_uoh_p (seg))
 #endif
                                                       );
-            //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
             assert (interior < heap_segment_allocated (seg));
 
             uint8_t* o = heap_segment_mem (seg);
@@ -18596,7 +17713,7 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
     }
     else if (interior >= low)
     {
-        heap_segment* seg = find_segment_per_heap (interior, TRUE);
+        heap_segment* seg = find_segment (interior, TRUE);
         if (seg)
         {
 #ifdef FEATURE_CONSERVATIVE_GC
@@ -18615,86 +17732,6 @@ uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
         return 0;
 }
 
-uint8_t*
-gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
-{
-    uint8_t* old_address = interior;
-    if (!((old_address >= low) && (old_address < high)))
-        return 0;
-    uint8_t* plug = 0;
-    size_t  brick = brick_of (old_address);
-    int    brick_entry =  brick_table [ brick ];
-    if (brick_entry != 0)
-    {
-    retry:
-        {
-            while (brick_entry < 0)
-            {
-                brick = (brick + brick_entry);
-                brick_entry =  brick_table [ brick ];
-            }
-            uint8_t* old_loc = old_address;
-            uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
-                                      old_loc);
-            if (node <= old_loc)
-                plug = node;
-            else
-            {
-                brick = brick - 1;
-                brick_entry =  brick_table [ brick ];
-                goto retry;
-            }
-
-        }
-        assert (plug);
-        //find the object by going along the plug
-        uint8_t* o = plug;
-        while (o <= interior)
-        {
-            uint8_t* next_o = o + Align (size (o));
-            assert (next_o > o);
-            if (next_o > interior)
-            {
-                break;
-            }
-            o = next_o;
-        }
-        assert ((o <= interior) && ((o + Align (size (o))) > interior));
-        return o;
-    }
-    else
-    {
-        // this is a pointer to a large object
-        heap_segment* seg = find_segment_per_heap (interior, FALSE);
-        if (seg)
-        {
-            assert (interior < heap_segment_allocated (seg));
-
-            uint8_t* o = heap_segment_mem (seg);
-            while (o < heap_segment_allocated (seg))
-            {
-                uint8_t* next_o = o + Align (size (o));
-                assert (next_o > o);
-                if ((o < interior) && (interior < next_o))
-                return o;
-                o = next_o;
-            }
-            return 0;
-        }
-        else
-            {
-            return 0;
-        }
-    }
-}
-#else //INTERIOR_POINTERS
-inline
-uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
-{
-    return o;
-}
-#endif //INTERIOR_POINTERS
-
 #ifdef MULTIPLE_HEAPS
 
 #ifdef MARK_LIST
@@ -18745,7 +17782,6 @@ BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high)
 #ifdef MULTIPLE_HEAPS
     else if (o)
     {
-        //find the heap
         gc_heap* hp = heap_of_gc (o);
         assert (hp);
         if ((o >= hp->gc_low) && (o < hp->gc_high))
@@ -18802,7 +17838,6 @@ BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high)
 #ifdef MULTIPLE_HEAPS
     else if (o)
     {
-        //find the heap
         gc_heap* hp = heap_of (o);
         assert (hp);
         if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
@@ -18997,7 +18032,9 @@ void gc_heap::enque_pinned_plug (uint8_t* plug,
 
 void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
 {
+#ifndef _DEBUG
     UNREFERENCED_PARAMETER(last_pinned_plug);
+#endif //_DEBUG
 
     mark& m = mark_stack_array[mark_stack_tos - 1];
     assert (last_pinned_plug == m.first);
@@ -19740,7 +18777,7 @@ gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
 }
 
 inline
-uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
+void gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
 {
     if ((o >= gc_low) && (o < gc_high))
         mark_object_simple (&o THREAD_NUMBER_ARG);
@@ -19754,8 +18791,6 @@ uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
             mark_object_simple (&o THREAD_NUMBER_ARG);
     }
 #endif //MULTIPLE_HEAPS
-
-    return o;
 }
 
 #ifdef BACKGROUND_GC
@@ -20023,27 +19058,6 @@ uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
     return o;
 }
 
-void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
-{
-    UNREFERENCED_PARAMETER(sc);
-
-    assert (settings.concurrent);
-    uint8_t* o = (uint8_t*)object;
-
-    gc_heap* hp = gc_heap::heap_of (o);
-#ifdef INTERIOR_POINTERS
-    if (flags & GC_CALL_INTERIOR)
-    {
-        o = hp->find_object (o, background_saved_lowest_address);
-    }
-#endif //INTERIOR_POINTERS
-
-    if (!background_object_marked (o, FALSE))
-    {
-        FATAL_GC_ERROR();
-    }
-}
-
 void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
 {
     UNREFERENCED_PARAMETER(sc);
@@ -20076,14 +19090,12 @@ void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t f
         return;
     }
 
-#ifdef INTERIOR_POINTERS
     if (flags & GC_CALL_INTERIOR)
     {
         o = hp->find_object (o, hp->background_saved_lowest_address);
         if (o == 0)
             return;
     }
-#endif //INTERIOR_POINTERS
 
 #ifdef FEATURE_CONSERVATIVE_GC
     // For conservative GC, a value on stack may point to middle of a free object.
@@ -20864,7 +19876,6 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
                 }
             }
 
-            // Restart all the workers.
             dprintf(3, ("Starting all gc thread mark stack overflow processing"));
             gc_t_join.restart();
         }
@@ -20887,7 +19898,6 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
         gc_t_join.join(this, gc_join_rescan_dependent_handles);
         if (gc_t_join.joined())
         {
-            // Restart all the workers.
             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
             gc_t_join.restart();
         }
@@ -21013,9 +20023,8 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
 #ifdef MULTIPLE_HEAPS
     gc_t_join.join(this, gc_join_begin_mark_phase);
     if (gc_t_join.joined())
-    {
 #endif //MULTIPLE_HEAPS
-
+    {
         maxgen_size_inc_p = false;
 
         num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
@@ -21043,11 +20052,10 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
 #endif //MH_SC_MARK
 
         gc_t_join.restart();
-    }
 #endif //MULTIPLE_HEAPS
+    }
 
     {
-
 #ifdef MARK_LIST
         //set up the mark lists from g_mark_list
         assert (g_mark_list);
@@ -21071,8 +20079,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
         slow  = MAX_PTR;
 #endif //MULTIPLE_HEAPS
 
-        //%type%  category = quote (mark);
-
         if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
         {
             GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
@@ -21099,7 +20105,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
         last_promoted_bytes = promoted_bytes (heap_number);
 
 #ifdef BACKGROUND_GC
-        if (recursive_gc_sync::background_running_p())
+        if (gc_heap::background_running_p())
         {
             scan_background_roots (GCHeap::Promote, heap_number, &sc);
         }
@@ -21126,9 +20132,9 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
 
 #ifdef TRACE_GC
         size_t promoted_before_cards = promoted_bytes (heap_number);
+        dprintf (3, ("before cards: %Id", promoted_before_cards));
 #endif //TRACE_GC
 
-        dprintf (3, ("before cards: %Id", promoted_before_cards));
         if (!full_p)
         {
 #ifdef CARD_BUNDLE
@@ -21266,11 +20272,9 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
             gc_t_join.r_init();
         }
 
-        //start all threads on the roots.
         dprintf(3, ("Starting all gc thread for short weak handle scan"));
         gc_t_join.restart();
 #endif //MULTIPLE_HEAPS
-
     }
 
 #ifdef FEATURE_CARD_MARKING_STEALING
@@ -21285,15 +20289,11 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
     dprintf(3, ("Joining for finalization"));
     gc_t_join.join(this, gc_join_scan_finalization);
     if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
-
     {
-#ifdef MULTIPLE_HEAPS
-        //start all threads on the roots.
         dprintf(3, ("Starting all gc thread for Finalization"));
         gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
     }
+#endif //MULTIPLE_HEAPS
 
     //Handle finalization.
     size_t promoted_bytes_live = promoted_bytes (heap_number);
@@ -21314,7 +20314,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
     gc_t_join.join(this, gc_join_null_dead_long_weak);
     if (gc_t_join.joined())
     {
-        //start all threads on the roots.
         dprintf(3, ("Starting all gc thread for weak pointer deletion"));
         gc_t_join.restart();
     }
@@ -21342,13 +20341,10 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
         GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
 
 #ifdef MULTIPLE_HEAPS
-
-#ifdef MARK_LIST
-#ifndef PARALLEL_MARK_LIST_SORT
+#if defined(MARK_LIST) && !defined(PARALLEL_MARK_LIST_SORT)
         //compact g_mark_list and sort it.
         combine_mark_lists();
-#endif //PARALLEL_MARK_LIST_SORT
-#endif //MARK_LIST
+#endif //MARK_LIST && !PARALLEL_MARK_LIST_SORT
 
         //decide on promotion
         if (!settings.promotion)
@@ -21430,7 +20426,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
         }
 #endif //SNOOP_STATS
 
-        //start all threads.
         dprintf(3, ("Starting all threads for end of mark phase"));
         gc_t_join.restart();
 #else //MULTIPLE_HEAPS
@@ -21458,7 +20453,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
                 settings.promotion = TRUE;
             }
         }
-
 #endif //MULTIPLE_HEAPS
     }
 
@@ -21700,7 +20694,6 @@ void set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
     ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
     //clear the left bit and the relocation field
     *place &= 1;
-    // store the value
     *place |= val;
 }
 
@@ -22144,8 +21137,6 @@ void gc_heap::seg_clear_mark_bits (heap_segment* seg)
 void gc_heap::sweep_ro_segments (heap_segment* start_seg)
 {
     //go through all of the segment in range and reset the mark bit
-    //TODO works only on small object segments
-
     heap_segment* seg = start_seg;
 
     while (seg)
@@ -22163,18 +21154,7 @@ void gc_heap::sweep_ro_segments (heap_segment* start_seg)
                 seg_clear_mark_bits (seg);
             }
 #else //BACKGROUND_GC
-
-#ifdef MARK_ARRAY
-            if(gc_can_use_concurrent)
-            {
-                clear_mark_array (max (heap_segment_mem (seg), lowest_address),
-                              min (heap_segment_allocated (seg), highest_address),
-                              FALSE); // read_only segments need the mark clear
-            }
-#else //MARK_ARRAY
             seg_clear_mark_bits (seg);
-#endif //MARK_ARRAY
-
 #endif //BACKGROUND_GC
         }
         seg = heap_segment_next (seg);
@@ -22260,10 +21240,8 @@ BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* allo
     return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) +  size) <= alloc_limit);
 }
 
-uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
+uint8_t* gc_heap::loh_allocate_in_condemned (size_t size)
 {
-    UNREFERENCED_PARAMETER(old_loc);
-
     generation* gen = large_object_generation;
     dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id",
         generation_allocation_pointer (gen),
@@ -22503,7 +21481,7 @@ BOOL gc_heap::plan_loh()
             }
             else
             {
-                new_address = loh_allocate_in_condemned (o, size);
+                new_address = loh_allocate_in_condemned (size);
             }
 
             loh_set_node_relocation_distance (o, (new_address - o));
@@ -22705,11 +21683,6 @@ void gc_heap::relocate_in_loh_compact()
     //Skip the generation gap object
     o = o + AlignQword (size (o));
 
-    relocate_args args;
-    args.low = gc_low;
-    args.high = gc_high;
-    args.last_plug = 0;
-
     while (1)
     {
         if (o >= heap_segment_allocated (seg))
@@ -22934,8 +21907,6 @@ void gc_heap::plan_phase (int condemned_gen_number)
 
     assert (settings.concurrent == FALSE);
 
-    // %type%  category = quote (plan);
-
     dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
                 condemned_gen_number, settings.promotion ? 1 : 0));
 
@@ -22958,7 +21929,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
     if ((condemned_gen_number < max_generation) &&
         (mark_list_index <= mark_list_end)
 #ifdef BACKGROUND_GC
-        && (!recursive_gc_sync::background_running_p())
+        && (!gc_heap::background_running_p())
 #endif //BACKGROUND_GC
         )
     {
@@ -23661,7 +22632,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
 #ifdef BACKGROUND_GC
                 if (current_c_gc_state == c_gc_state_marking)
                 {
-                    assert (recursive_gc_sync::background_running_p());
+                    assert (gc_heap::background_running_p());
                     while ((xl < end) && !marked (xl))
                     {
                         dprintf (4, ("-%Ix-", (size_t)xl));
@@ -24092,12 +23063,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
             }
         }
 
-        //start all threads on the roots.
         dprintf(3, ("Starting all gc threads after compaction decision"));
         gc_t_join.restart();
     }
 
-    //reset the local variable accordingly
     should_compact = (gc_policy >= policy_compact);
     should_expand  = (gc_policy >= policy_expand);
 
@@ -24258,14 +23227,12 @@ void gc_heap::plan_phase (int condemned_gen_number)
             dprintf(3, ("Joining after end of compaction"));
             gc_t_join.join(this, gc_join_adjust_handle_age_compact);
             if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
             {
-#ifdef MULTIPLE_HEAPS
                 //join all threads to make sure they are synchronized
                 dprintf(3, ("Restarting after Promotion granted"));
                 gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
             }
+#endif //MULTIPLE_HEAPS
 
             ScanContext sc;
             sc.thread_number = heap_number;
@@ -24379,9 +23346,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
         if (settings.promotion && !settings.demotion)
         {
             uint8_t* start = generation_allocation_start (youngest_generation);
-            MAYBE_UNUSED_VAR(start);
+#ifdef _DEBUG
             assert (heap_segment_allocated (ephemeral_heap_segment) ==
                     (start + Align (size (start))));
+#endif //_DEBUG
         }
     }
     else
@@ -24484,7 +23452,9 @@ Called after compact phase to fix all generation gaps
 void gc_heap::fix_generation_bounds (int condemned_gen_number,
                                      generation* consing_gen)
 {
+#ifndef _DEBUG
     UNREFERENCED_PARAMETER(consing_gen);
+#endif //_DEBUG
 
     assert (generation_allocation_segment (consing_gen) ==
             ephemeral_heap_segment);
@@ -24515,11 +23485,7 @@ void gc_heap::fix_generation_bounds (int condemned_gen_number,
         // new ephemeral generations will be empty and there'll be no need to set cards for the
         // old ephemeral generations that got promoted into max_generation.
         ptrdiff_t delta = 0;
-#ifdef SEG_MAPPING_TABLE
         heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
-#else //SEG_MAPPING_TABLE
-        heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
-#endif //SEG_MAPPING_TABLE
 
         assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
         size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
@@ -24534,14 +23500,14 @@ void gc_heap::fix_generation_bounds (int condemned_gen_number,
     {
         alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
         //reset the allocated size
+#ifdef _DEBUG
         uint8_t* start = generation_allocation_start (youngest_generation);
-        MAYBE_UNUSED_VAR(start);
         if (settings.promotion && !settings.demotion)
         {
             assert ((start + Align (size (start))) ==
                     heap_segment_plan_allocated(ephemeral_heap_segment));
         }
-
+#endif //_DEBUG
         heap_segment_allocated(ephemeral_heap_segment)=
             heap_segment_plan_allocated(ephemeral_heap_segment);
     }
@@ -24632,7 +23598,6 @@ void gc_heap::make_free_lists (int condemned_gen_number)
                 {
                     //We had an empty segment
                     //need to allocate the generation start
-
                     generation* gen = generation_of (max_generation);
 
                     heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
@@ -24979,27 +23944,11 @@ uint8_t* tree_search (uint8_t* tree, uint8_t* old_address)
 #ifdef FEATURE_BASICFREEZE
 bool gc_heap::frozen_object_p (Object* obj)
 {
-#ifdef MULTIPLE_HEAPS
-#ifdef SEG_MAPPING_TABLE
-    heap_segment* pSegment = seg_mapping_table_segment_of((uint8_t*)obj);
-#else
-    ptrdiff_t delta = 0;
-    heap_segment* pSegment = segment_of ((uint8_t*)obj, delta);
-#endif
-#else //MULTIPLE_HEAPS
-    heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
-    _ASSERTE(pSegment);
-#endif //MULTIPLE_HEAPS
-
-    return heap_segment_read_only_p(pSegment);
+    heap_segment* seg = seg_mapping_table_segment_of ((uint8_t*)obj);
+    return heap_segment_read_only_p (seg);
 }
 #endif // FEATURE_BASICFREEZE
 
-#ifdef FEATURE_REDHAWK
-// TODO: this was added on RH, we have not done perf runs to see if this is the right
-// thing to do for other versions of the CLR.
-inline
-#endif // FEATURE_REDHAWK
 void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
 {
     uint8_t* old_address = *pold_address;
@@ -25061,18 +24010,7 @@ void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
 #ifdef FEATURE_LOH_COMPACTION
     if (loh_compacted_p)
     {
-#ifdef MULTIPLE_HEAPS
-#ifdef SEG_MAPPING_TABLE
         heap_segment* pSegment = seg_mapping_table_segment_of ((uint8_t*)old_address);
-#else
-        ptrdiff_t delta = 0;
-        heap_segment* pSegment = segment_of ((uint8_t*)old_address, delta);
-#endif
-#else //MULTIPLE_HEAPS
-        heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)old_address, FALSE);
-        _ASSERTE(pSegment);
-#endif //MULTIPLE_HEAPS
-
         size_t flags = pSegment->flags;
         if ((flags & heap_segment_flags_loh)
 #ifdef FEATURE_BASICFREEZE
@@ -25334,8 +24272,7 @@ void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
 // if we expanded, right now we are not handling it as We are not saving the new reloc info.
 void gc_heap::verify_pins_with_post_plug_info (const char* msg)
 {
-    UNREFERENCED_PARAMETER(msg);
-#if defined  (_DEBUG) && defined (VERIFY_HEAP)
+#if defined (_DEBUG) && defined (VERIFY_HEAP)
     if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
     {
         if (!verify_pinned_queue_p)
@@ -25387,6 +24324,8 @@ void gc_heap::verify_pins_with_post_plug_info (const char* msg)
 
         dprintf (3, ("%s verified", msg));
     }
+#else
+    UNREFERENCED_PARAMETER(msg);
 #endif // _DEBUG && VERIFY_HEAP
 }
 
@@ -25802,7 +24741,6 @@ void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type t
 #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
 void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
 {
-    // This should only be called for BGCs
     assert(settings.concurrent);
 
     for (int i = max_generation; i < total_generation_count; i++)
@@ -25851,7 +24789,7 @@ void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn
         }
     }
 }
-#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
 
 void gc_heap::relocate_phase (int condemned_gen_number,
                               uint8_t* first_condemned_address)
@@ -25861,24 +24799,20 @@ void gc_heap::relocate_phase (int condemned_gen_number,
     sc.promotion = FALSE;
     sc.concurrent = FALSE;
 
-//  %type%  category = quote (relocate);
-    dprintf (2,("---- Relocate phase -----"));
-
 #ifdef MULTIPLE_HEAPS
     //join all threads to make sure they are synchronized
     dprintf(3, ("Joining after end of plan"));
     gc_t_join.join(this, gc_join_begin_relocate_phase);
     if (gc_t_join.joined())
-#endif //MULTIPLE_HEAPS
-
     {
-#ifdef MULTIPLE_HEAPS
 
         //join all threads to make sure they are synchronized
         dprintf(3, ("Restarting for relocation"));
         gc_t_join.restart();
-#endif //MULTIPLE_HEAPS
     }
+#endif //MULTIPLE_HEAPS
+
+    dprintf (2,("---- Relocate phase -----"));
 
     dprintf(3,("Relocating roots"));
     GCScan::GcScanRoots(GCHeap::Relocate,
@@ -25887,7 +24821,7 @@ void gc_heap::relocate_phase (int condemned_gen_number,
     verify_pins_with_post_plug_info("after reloc stack");
 
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         scan_background_roots (GCHeap::Relocate, heap_number, &sc);
     }
@@ -26024,12 +24958,6 @@ void gc_heap::relocate_phase (int condemned_gen_number,
     }
 #endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
 
-#ifdef MULTIPLE_HEAPS
-    //join all threads to make sure they are synchronized
-    dprintf(3, ("Joining after end of relocation"));
-    gc_t_join.join(this, gc_join_relocate_phase_done);
-#endif //MULTIPLE_HEAPS
-
     dprintf(2,( "---- End of Relocate phase ----"));
 }
 
@@ -26385,12 +25313,20 @@ void gc_heap::compact_phase (int condemned_gen_number,
                              uint8_t*  first_condemned_address,
                              BOOL clear_cards)
 {
-//  %type%  category = quote (compact);
+#ifdef MULTIPLE_HEAPS
+    dprintf(3, ("Joining after end of relocation"));
+    gc_t_join.join(this, gc_join_relocate_phase_done);
+    if (gc_t_join.joined())
+    {
+        dprintf(3, ("Restarting for compaction"));
+        gc_t_join.restart();
+    }
+#endif //MULTIPLE_HEAPS
+
     generation*   condemned_gen = generation_of (condemned_gen_number);
     uint8_t*  start_address = first_condemned_address;
     size_t   current_brick = brick_of (start_address);
     heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
-
     PREFIX_ASSUME(current_heap_segment != NULL);
 
     reset_pinned_queue_bos();
@@ -26424,20 +25360,6 @@ void gc_heap::compact_phase (int condemned_gen_number,
     dprintf (2,("---- Compact Phase: %Ix(%Ix)----",
         first_condemned_address, brick_of (first_condemned_address)));
 
-#ifdef MULTIPLE_HEAPS
-    //restart
-    if (gc_t_join.joined())
-    {
-#endif //MULTIPLE_HEAPS
-
-#ifdef MULTIPLE_HEAPS
-        dprintf(3, ("Restarting for compaction"));
-        gc_t_join.restart();
-    }
-#endif //MULTIPLE_HEAPS
-
-    reset_pinned_queue_bos();
-
 #ifdef FEATURE_LOH_COMPACTION
     if (loh_compacted_p)
     {
@@ -26554,15 +25476,11 @@ void gc_heap::bgc_thread_stub (void* arg)
 #pragma warning(pop)
 #endif //_MSC_VER
 
-#endif //BACKGROUND_GC
-
-/*------------------ Background GC ----------------------------*/
-
-#ifdef BACKGROUND_GC
-
 void gc_heap::background_drain_mark_list (int thread)
 {
+#ifndef MULTIPLE_HEAPS
     UNREFERENCED_PARAMETER(thread);
+#endif //!MULTIPLE_HEAPS
 
     size_t saved_c_mark_list_index = c_mark_list_index;
 
@@ -26665,7 +25583,6 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
                 }
             }
 
-            // Restart all the workers.
             dprintf(2, ("Starting all gc thread mark stack overflow processing"));
             bgc_t_join.restart();
         }
@@ -26688,7 +25605,6 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
         bgc_t_join.join(this, gc_join_rescan_dependent_handles);
         if (bgc_t_join.joined())
         {
-            // Restart all the workers.
             dprintf(3, ("Starting all gc thread for dependent handle promotion"));
             bgc_t_join.restart();
         }
@@ -26734,7 +25650,7 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
 
 void gc_heap::recover_bgc_settings()
 {
-    if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
+    if ((settings.condemned_generation < max_generation) && gc_heap::background_running_p())
     {
         dprintf (2, ("restoring bgc settings"));
         settings = saved_bgc_settings;
@@ -26759,7 +25675,7 @@ void gc_heap::allow_fgc()
 
 BOOL gc_heap::should_commit_mark_array()
 {
-    return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
+    return (gc_heap::background_running_p() || (current_bgc_state == bgc_initialized));
 }
 
 void gc_heap::clear_commit_flag()
@@ -26820,18 +25736,11 @@ void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t*
 #endif //_DEBUG
 }
 
-void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
-{
-    verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
-}
-
 BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
                                          heap_segment* seg,
                                          uint32_t* new_card_table,
                                          uint8_t* new_lowest_address)
 {
-    UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced
-
     uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
     uint8_t* end = heap_segment_reserved (seg);
 
@@ -26970,10 +25879,8 @@ BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_
     return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
 }
 
-BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
+BOOL gc_heap::commit_mark_array_bgc_init()
 {
-    UNREFERENCED_PARAMETER(mark_array_addr);
-
     dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix",
                             lowest_address, highest_address, mark_array));
 
@@ -27257,7 +26164,6 @@ void gc_heap::background_mark_phase ()
             // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
             // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
             // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
-#ifdef WRITE_WATCH
             concurrent_print_time_delta ("CRWW begin");
 
 #ifdef MULTIPLE_HEAPS
@@ -27270,7 +26176,6 @@ void gc_heap::background_mark_phase ()
 #endif //MULTIPLE_HEAPS
 
             concurrent_print_time_delta ("CRWW");
-#endif //WRITE_WATCH
 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
 
             num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
@@ -27281,7 +26186,6 @@ void gc_heap::background_mark_phase ()
             dprintf (GTC_LOG, ("setting cm_in_progress"));
             c_write (cm_in_progress, TRUE);
 
-            //restart all thread, doing the marking from the array
             assert (dont_restart_ee_p);
             dont_restart_ee_p = FALSE;
 
@@ -27488,15 +26392,17 @@ void gc_heap::background_mark_phase ()
 #ifdef MULTIPLE_HEAPS
         bgc_t_join.join(this, gc_join_after_absorb);
         if (bgc_t_join.joined())
+#endif //MULTIPLE_HEAPS
         {
 #ifdef BGC_SERVO_TUNING
             bgc_tuning::record_bgc_sweep_start();
 #endif //BGC_SERVO_TUNING
 
+#ifdef MULTIPLE_HEAPS
             dprintf(3, ("Joining BGC threads after absorb"));
             bgc_t_join.restart();
-        }
 #endif //MULTIPLE_HEAPS
+        }
 
         // give VM a chance to do work
         GCToEEInterface::GcBeforeBGCSweepWork();
@@ -27518,9 +26424,7 @@ void gc_heap::background_mark_phase ()
         //concurrent_print_time_delta ("nonconcurrent marking stack roots");
         concurrent_print_time_delta ("NRS");
 
-//        finalize_queue->EnterFinalizeLock();
         finalize_queue->GcScanRoots(background_promote, heap_number, 0);
-//        finalize_queue->LeaveFinalizeLock();
 
         dprintf (2, ("nonconcurrent marking handle table"));
         GCScan::GcScanHandles(background_promote,
@@ -27600,7 +26504,6 @@ void gc_heap::background_mark_phase ()
         }
 #endif //MULTIPLE_HEAPS
 
-        //Handle finalization.
         dprintf(3,("Marking finalization data"));
         //concurrent_print_time_delta ("bgc joined to mark finalization");
         concurrent_print_time_delta ("NRj");
@@ -27680,7 +26583,6 @@ void gc_heap::background_mark_phase ()
 
         if (heap_segment_allocated (seg) == heap_segment_mem (seg))
         {
-            // This can't happen...
             FATAL_GC_ERROR();
         }
 
@@ -27782,14 +26684,11 @@ inline uint8_t* gc_heap::high_page ( heap_segment* seg, BOOL concurrent_p)
 void gc_heap::revisit_written_page (uint8_t* page,
                                     uint8_t* end,
                                     BOOL concurrent_p,
-                                    heap_segment* seg,
                                     uint8_t*& last_page,
                                     uint8_t*& last_object,
                                     BOOL large_objects_p,
                                     size_t& num_marked_objects)
 {
-    UNREFERENCED_PARAMETER(seg);
-
     uint8_t*   start_address = page;
     uint8_t*   o             = 0;
     int align_const = get_alignment_constant (!large_objects_p);
@@ -27954,7 +26853,6 @@ end_limit:
 // allocated out of range all live.
 void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
 {
-#ifdef WRITE_WATCH
     if (concurrent_p && !reset_only_p)
     {
         current_bgc_state = bgc_revisit_soh;
@@ -28069,9 +26967,9 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
                                 {
                                     //search for marked objects in the page
                                     revisit_written_page (page, high_address, concurrent_p,
-                                                        seg, last_page, last_object,
-                                                        !small_object_segments,
-                                                        total_marked_objects);
+                                                          last_page, last_object,
+                                                          !small_object_segments,
+                                                          total_marked_objects);
                                 }
                                 else
                                 {
@@ -28101,21 +26999,18 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
             if (!reset_only_p)
             {
                 dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
-                fire_revisit_event (total_dirtied_pages, total_marked_objects, /* large_objects_p */ FALSE);
+                fire_revisit_event (total_dirtied_pages, total_marked_objects, FALSE);
                 concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
                 total_dirtied_pages = 0;
                 total_marked_objects = 0;
             }
 
-            //switch to large segment
             if (concurrent_p && !reset_only_p)
             {
                 current_bgc_state = bgc_revisit_uoh;
             }
 
             small_object_segments = FALSE;
-            //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");
-
             dprintf (3, ("now revisiting large object segments"));
         }
         else
@@ -28127,12 +27022,10 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
             else
             {
                 dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
-                fire_revisit_event (total_dirtied_pages, total_marked_objects, /* large_objects_p */ TRUE);
+                fire_revisit_event (total_dirtied_pages, total_marked_objects, TRUE);
             }
         }
     }
-
-#endif //WRITE_WATCH
 }
 
 void gc_heap::background_grow_c_mark_list()
@@ -28205,14 +27098,12 @@ void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
         return;
     }
 
-#ifdef INTERIOR_POINTERS
     if (flags & GC_CALL_INTERIOR)
     {
         o = hp->find_object (o, hp->background_saved_lowest_address);
         if (o == 0)
             return;
     }
-#endif //INTERIOR_POINTERS
 
 #ifdef FEATURE_CONSERVATIVE_GC
     // For conservative GC, a value on stack may point to middle of a free object.
@@ -28445,7 +27336,6 @@ void gc_heap::kill_gc_thread()
     bgc_start_event.CloseEvent();
     bgc_threads_timeout_cs.Destroy();
     bgc_thread = 0;
-    recursive_gc_sync::shutdown();
 }
 
 void gc_heap::bgc_thread_function()
@@ -28518,7 +27408,7 @@ void gc_heap::bgc_thread_function()
             dprintf (3, ("no concurrent GC needed, exiting"));
             break;
         }
-        recursive_gc_sync::begin_background();
+        gc_background_running = TRUE;
         dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d",
             generation_free_list_space (generation_of (max_generation)),
             generation_free_obj_space (generation_of (max_generation)),
@@ -28577,7 +27467,7 @@ void gc_heap::bgc_thread_function()
 #endif //MULTIPLE_HEAPS
 
             c_write (settings.concurrent, FALSE);
-            recursive_gc_sync::end_background();
+            gc_background_running = FALSE;
             keep_bgc_threads_p = FALSE;
             background_gc_done_event.Set();
 
@@ -28659,7 +27549,7 @@ bool gc_heap::bgc_tuning::should_trigger_bgc_loh()
         gc_heap* hp = pGenGCHeap;
 #endif //MULTIPLE_HEAPS
 
-        if (!(recursive_gc_sync::background_running_p()))
+        if (!(gc_heap::background_running_p()))
         {
             size_t current_alloc = get_total_servo_alloc (loh_generation);
             tuning_calculation* current_gen_calc = &gen_calc[loh_generation - max_generation];
@@ -28690,7 +27580,7 @@ bool gc_heap::bgc_tuning::should_trigger_bgc_loh()
 
 bool gc_heap::bgc_tuning::should_trigger_bgc()
 {
-    if (!bgc_tuning::enable_fl_tuning || recursive_gc_sync::background_running_p())
+    if (!bgc_tuning::enable_fl_tuning || gc_heap::background_running_p())
     {
         return false;
     }
@@ -29940,7 +28830,6 @@ uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
     size_t curr_cl = (size_t)next_o / brick_size;
     size_t min_cl = (size_t)first_object / brick_size;
 
-    //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
 #ifdef TRACE_GC
     unsigned int n_o = 1;
 #endif //TRACE_GC
@@ -30176,11 +29065,9 @@ uint8_t* compute_next_end (heap_segment* seg, uint8_t* low)
 }
 
 uint8_t*
-gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
+gc_heap::compute_next_boundary (int gen_number,
                                 BOOL relocating)
 {
-    UNREFERENCED_PARAMETER(low);
-
     //when relocating, the fault line is the plan start of the younger
     //generation because the generation is promoted.
     if (relocating && (gen_number == (settings.condemned_generation + 1)))
@@ -30202,7 +29089,6 @@ inline void
 gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
                          size_t& cg_pointers_found)
 {
-    THREAD_FROM_HEAP;
     if ((gc_low <= o) && (gc_high > o))
     {
         n_gen++;
@@ -30456,7 +29342,7 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating CARD_
     generation*   oldest_gen        = generation_of (max_generation);
     int           curr_gen_number   = max_generation;
     uint8_t*      gen_boundary      = generation_allocation_start(generation_of(curr_gen_number - 1));
-    uint8_t*      next_boundary     = compute_next_boundary(gc_low, curr_gen_number, relocating);
+    uint8_t*      next_boundary     = compute_next_boundary(curr_gen_number, relocating);
 
     heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
     PREFIX_ASSUME(seg != NULL);
@@ -30604,7 +29490,7 @@ void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating CARD_
                     gen_boundary = generation_allocation_start
                         (generation_of (curr_gen_number - 1));
                     next_boundary = (compute_next_boundary
-                                     (low, curr_gen_number, relocating));
+                                     (curr_gen_number, relocating));
                 }
 
                 dprintf (4, ("|%Ix|", (size_t)o));
@@ -31371,11 +30257,11 @@ adjust:
 }
 
 BOOL gc_heap::process_free_space (heap_segment* seg,
-                         size_t free_space,
-                         size_t min_free_size,
-                         size_t min_cont_size,
-                         size_t* total_free_space,
-                         size_t* largest_free_space)
+                                  size_t free_space,
+                                  size_t min_free_size,
+                                  size_t min_cont_size,
+                                  size_t* total_free_space,
+                                  size_t* largest_free_space)
 {
     *total_free_space += free_space;
     *largest_free_space = max (*largest_free_space, free_space);
@@ -31658,7 +30544,6 @@ next:
         }
 
        //tally up free space
-
         while (1)
         {
             while (free_list)
@@ -32032,7 +30917,9 @@ generation* gc_heap::expand_heap (int condemned_generation,
                                   generation* consing_gen,
                                   heap_segment* new_heap_segment)
 {
+#ifndef _DEBUG
     UNREFERENCED_PARAMETER(condemned_generation);
+#endif //!_DEBUG
     assert (condemned_generation >= (max_generation -1));
     unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
     uint8_t*  start_address = generation_limit (max_generation);
@@ -32040,7 +30927,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
     BOOL should_promote_ephemeral = FALSE;
     ptrdiff_t eph_size = total_ephemeral_size;
 #ifdef BACKGROUND_GC
-    dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
+    dprintf(2,("%s: ---- Heap Expansion ----", (gc_heap::background_running_p() ? "FGC" : "NGC")));
 #endif //BACKGROUND_GC
     settings.heap_expansion = TRUE;
 
@@ -32216,12 +31103,10 @@ generation* gc_heap::expand_heap (int condemned_generation,
         get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
 #endif //!MULTIPLE_HEAPS
     }
-    ptrdiff_t eph_size1 = total_ephemeral_size;
-    MAYBE_UNUSED_VAR(eph_size1);
 
     if (!should_promote_ephemeral && new_segment_p)
     {
-        assert (eph_size1 <= eph_size);
+        assert ((ptrdiff_t)total_ephemeral_size <= eph_size);
     }
 
     if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
@@ -32829,7 +31714,6 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
     dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
     dd_new_allocation (dd) = dd_gc_new_allocation (dd);
 
-    //update counter
     dd_promoted_size (dd) = out;
     if (gen_number == max_generation)
     {
@@ -32862,7 +31746,6 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
                 end_poh_size = total_gen_size;
 
 #endif //BACKGROUND_GC
-            //update counter
             dd_promoted_size (dd) = out;
         }
     }
@@ -33047,26 +31930,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
         fragmentation, (int)(fragmentation_burden * 100.0)));
 
 #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
-    // for pure GC stress runs we need compaction, for GC stress "mix"
-    // we need to ensure a better mix of compacting and sweeping collections
-    if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
-        && !g_pConfig->IsGCStressMix())
+    // for GC stress runs we need compaction
+    if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent)
         should_compact = TRUE;
-
-#ifdef GC_STATS
-    // in GC stress "mix" mode, for stress induced collections make sure we
-    // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
-    // against the GC's determination, as it may lead to premature OOMs.
-    if (g_pConfig->IsGCStressMix() && settings.stress_induced)
-    {
-        int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
-        int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
-        if (compactions < sweeps / 10)
-        {
-            should_compact = TRUE;
-        }
-    }
-#endif // GC_STATS
 #endif //defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
 
     if (GCConfig::GetForceCompact())
@@ -33477,18 +32343,6 @@ CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int g
 #pragma inline_depth(20)
 #endif //_MSC_VER
 
-#ifdef MARK_ARRAY
-    uint8_t* current_lowest_address = lowest_address;
-    uint8_t* current_highest_address = highest_address;
-#ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
-    {
-        current_lowest_address = background_saved_lowest_address;
-        current_highest_address = background_saved_highest_address;
-    }
-#endif //BACKGROUND_GC
-#endif // MARK_ARRAY
-
 #ifdef FEATURE_LOH_COMPACTION
     // The GC allocator made a free object already in this alloc context and
     // adjusted the alloc_ptr accordingly.
@@ -33501,9 +32355,12 @@ CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int g
 
     CObjectHeader* obj = (CObjectHeader*)result;
 
-#ifdef MARK_ARRAY
-    if (recursive_gc_sync::background_running_p())
+#ifdef BACKGROUND_GC
+    if (gc_heap::background_running_p())
     {
+        uint8_t* current_lowest_address = background_saved_lowest_address;
+        uint8_t* current_highest_address = background_saved_highest_address;
+
         if ((result < current_highest_address) && (result >= current_lowest_address))
         {
             dprintf (3, ("Clearing mark bit at address %Ix",
@@ -33511,7 +32368,6 @@ CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int g
 
             mark_array_clear_marked (result);
         }
-#ifdef BACKGROUND_GC
         if (current_c_gc_state != c_gc_state_free)
         {
             dprintf (3, ("Concurrent allocation of a large object %Ix",
@@ -33525,9 +32381,8 @@ CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int g
                 mark_array_set_marked (result);
             }
         }
-#endif //BACKGROUND_GC
     }
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
     assert (obj != 0);
     assert ((size_t)obj == Align ((size_t)obj, align_const));
@@ -33752,8 +32607,8 @@ void gc_heap::generation_delete_heap_segment (generation* gen,
         heap_segment_next (next_seg) = prev_seg;
 
         dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
-        heap_segment_next (seg) = freeable_small_heap_segment;
-        freeable_small_heap_segment = seg;
+        heap_segment_next (seg) = freeable_soh_segment;
+        freeable_soh_segment = seg;
     }
 
     decommit_heap_segment (seg);
@@ -34119,13 +32974,11 @@ void gc_heap::background_sweep()
 #ifdef MULTIPLE_HEAPS
     bgc_t_join.join(this, gc_join_restart_ee);
     if (bgc_t_join.joined())
-#endif //MULTIPLE_HEAPS
     {
-#ifdef MULTIPLE_HEAPS
         dprintf(2, ("Starting BGC threads for resuming EE"));
         bgc_t_join.restart();
-#endif //MULTIPLE_HEAPS
     }
+#endif //MULTIPLE_HEAPS
 
     if (heap_number == 0)
     {
@@ -34853,20 +33706,6 @@ go_through_refs:
     }
 }
 
-void gc_heap::descr_segment (heap_segment* seg )
-{
-#ifdef TRACE_GC
-    uint8_t*  x = heap_segment_mem (seg);
-    while (x < heap_segment_allocated (seg))
-    {
-        dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
-        x = x + Align(size (x));
-    }
-#else // TRACE_GC
-    UNREFERENCED_PARAMETER(seg);
-#endif // TRACE_GC
-}
-
 void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
 {
 #ifdef MULTIPLE_HEAPS
@@ -34993,7 +33832,10 @@ void gc_heap::print_free_list (int gen, heap_segment* seg)
 
 void gc_heap::descr_generations (BOOL begin_gc_p)
 {
+#ifndef TRACE_GC
     UNREFERENCED_PARAMETER(begin_gc_p);
+#endif //!TRACE_GC
+
 #ifdef STRESS_LOG
     if (StressLog::StressLogOn(LF_GC, LL_INFO10))
     {
@@ -35112,42 +33954,21 @@ void gc_heap::descr_generations (BOOL begin_gc_p)
 //
 //-----------------------------------------------------------------------------
 
-
-#ifdef TRACE_GC
-
- unsigned int PromotedObjectCount  = 0;
- unsigned int CreatedObjectCount   = 0;
- unsigned int AllocCount           = 0;
- unsigned int AllocBigCount        = 0;
- unsigned int AllocSmallCount      = 0;
- int64_t      AllocStart           = 0;
- int64_t      AllocDuration        = 0;
-#endif //TRACE_GC
-
 //Static member variables.
 VOLATILE(BOOL)    GCHeap::GcInProgress            = FALSE;
-//GCTODO
-//CMCSafeLock*      GCHeap::fGcLock;
-GCEvent            *GCHeap::WaitForGCEvent         = NULL;
-//GCTODO
-#if defined(TRACE_GC) && !defined(MULTIPLE_HEAPS)
-uint64_t            GCHeap::GcDuration;
-#endif //TRACE_GC
-unsigned            GCHeap::GcCondemnedGeneration   = 0;
-size_t              GCHeap::totalSurvivedSize       = 0;
+GCEvent           *GCHeap::WaitForGCEvent         = NULL;
+unsigned          GCHeap::GcCondemnedGeneration   = 0;
+size_t            GCHeap::totalSurvivedSize       = 0;
 #ifdef FEATURE_PREMORTEM_FINALIZATION
-CFinalize*          GCHeap::m_Finalize              = 0;
-BOOL                GCHeap::GcCollectClasses        = FALSE;
-VOLATILE(int32_t)      GCHeap::m_GCFLock               = 0;
+CFinalize*        GCHeap::m_Finalize              = 0;
+BOOL              GCHeap::GcCollectClasses        = FALSE;
+VOLATILE(int32_t) GCHeap::m_GCFLock               = 0;
 
 #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
 #ifdef STRESS_HEAP
-#ifdef BACKGROUND_GC
-int                 GCHeap::gc_stress_fgcs_in_bgc   = 0;
-#endif // BACKGROUND_GC
 #ifndef MULTIPLE_HEAPS
-OBJECTHANDLE        GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
-int                 GCHeap::m_CurStressObj          = 0;
+OBJECTHANDLE      GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
+int               GCHeap::m_CurStressObj          = 0;
 #endif // !MULTIPLE_HEAPS
 #endif // STRESS_HEAP
 #endif // FEATURE_REDHAWK
@@ -35242,8 +34063,9 @@ BOOL gc_heap::bgc_mark_array_range (heap_segment* seg,
 
 void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
 {
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
-    if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
+#ifdef VERIFY_HEAP
+    if (gc_heap::background_running_p() && 
+        (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
     {
         uint8_t* range_beg = 0;
         uint8_t* range_end = 0;
@@ -35270,12 +34092,12 @@ void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
             }
         }
     }
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
 }
 
 void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
 {
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
+#ifdef VERIFY_HEAP
     size_t start_mark_bit = mark_bit_of (obj) + 1;
     size_t end_mark_bit = mark_bit_of (obj + s);
     unsigned int startbit = mark_bit_bit (start_mark_bit);
@@ -35327,15 +34149,11 @@ void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
             FATAL_GC_ERROR();
         }
     }
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
 }
 
 void gc_heap::clear_all_mark_array()
 {
-#ifdef MARK_ARRAY
-    //size_t num_dwords_written = 0;
-    //size_t begin_time = GetHighPrecisionTimeStamp();
-
     for (int i = max_generation; i < total_generation_count; i++)
     {
         generation* gen = generation_of (i);
@@ -35384,40 +34202,13 @@ void gc_heap::clear_all_mark_array()
             seg = heap_segment_next_rw (seg);
         }
     }
-
-    //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;
-
-    //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
-
-#endif //MARK_ARRAY
 }
 
-#endif //BACKGROUND_GC
-
-void gc_heap::verify_mark_array_cleared (heap_segment* seg)
+void gc_heap::verify_mark_array_cleared()
 {
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
-    assert (card_table == g_gc_card_table);
-    size_t  markw = mark_word_of (heap_segment_mem (seg));
-    size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
-
-    while (markw < markw_end)
-    {
-        if (mark_array [markw])
-        {
-            dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
-                            markw, mark_array [markw], mark_word_address (markw)));
-            FATAL_GC_ERROR();
-        }
-        markw++;
-    }
-#endif //VERIFY_HEAP && MARK_ARRAY
-}
-
-void gc_heap::verify_mark_array_cleared ()
-{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
-    if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
+#ifdef VERIFY_HEAP
+    if (gc_heap::background_running_p() && 
+        (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
     {
         for (int i = max_generation; i < total_generation_count; i++)
         {
@@ -35431,56 +34222,9 @@ void gc_heap::verify_mark_array_cleared ()
             }
         }
     }
-#endif //VERIFY_HEAP && MARK_ARRAY
-}
-
-void gc_heap::verify_seg_end_mark_array_cleared()
-{
-#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
-    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
-    {
-        for (int i = max_generation; i < total_generation_count; i++)
-        {
-            generation* gen = generation_of (i);
-            heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
-
-            while (seg)
-            {
-                // We already cleared all mark array bits for ephemeral generations
-                // at the beginning of bgc sweep
-                uint8_t* from = ((seg == ephemeral_heap_segment) ?
-                    generation_allocation_start (generation_of (max_generation - 1)) :
-                    heap_segment_allocated (seg));
-                size_t  markw = mark_word_of (align_on_mark_word (from));
-                size_t  markw_end = mark_word_of (heap_segment_reserved (seg));
-
-                while (from < mark_word_address (markw))
-                {
-                    if (is_mark_bit_set (from))
-                    {
-                        dprintf (3, ("mark bit for %Ix was not cleared", from));
-                        FATAL_GC_ERROR();
-                    }
-
-                    from += mark_bit_pitch;
-                }
-
-                while (markw < markw_end)
-                {
-                    if (mark_array [markw])
-                    {
-                        dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
-                            markw, mark_array [markw], mark_word_address (markw)));
-                        FATAL_GC_ERROR();
-                    }
-                    markw++;
-                }
-                seg = heap_segment_next_rw (seg);
-            }
-        }
-    }
-#endif //VERIFY_HEAP && MARK_ARRAY
+#endif //VERIFY_HEAP
 }
+#endif //BACKGROUND_GC
 
 // This function is called to make sure we don't mess up the segment list
 // in SOH. It's called by:
@@ -35512,10 +34256,9 @@ void gc_heap::verify_soh_segment_list()
 // sweep.
 // NOTE - to be able to call this function during background sweep, we need to temporarily
 // NOT clear the mark array bits as we go.
-void gc_heap::verify_partial ()
-{
 #ifdef BACKGROUND_GC
-
+void gc_heap::verify_partial()
+{
     // Different ways to fail.
     BOOL mark_missed_p = FALSE;
     BOOL bad_ref_p = FALSE;
@@ -35579,11 +34322,10 @@ void gc_heap::verify_partial ()
             seg = heap_segment_next_rw (seg);
         }
     }
-#endif //BACKGROUND_GC
 }
+#endif //BACKGROUND_GC
 
 #ifdef VERIFY_HEAP
-
 void
 gc_heap::verify_free_lists ()
 {
@@ -35667,12 +34409,15 @@ gc_heap::verify_heap (BOOL begin_gc_p)
 #endif //BACKGROUND_GC
 #endif //MULTIPLE_HEAPS
 
+#ifndef TRACE_GC
     UNREFERENCED_PARAMETER(begin_gc_p);
+#endif //!TRACE_GC
+
 #ifdef BACKGROUND_GC
     dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin",
         (begin_gc_p ? "BEG" : "END"),
         VolatileLoad(&settings.gc_index),
-        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
+        (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC"))));
 #else
     dprintf (2,("[%s]GC#%d: Verifying heap - begin",
                 (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
@@ -35937,7 +34682,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
                     if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
                         deep_verify_obj = FALSE;
 
-                    ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);
+                    ((CObjectHeader*)curr_object)->ValidateHeap(deep_verify_obj);
 
                     if (can_verify_deep)
                     {
@@ -36030,7 +34775,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
 
 #ifdef BACKGROUND_GC
     dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id",
-                 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
+                 (settings.concurrent ? "BGC" : (gc_heap::background_running_p () ? "FGC" : "NGC")),
                  (begin_gc_p ? "BEG" : "END"),
                  ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
                  total_objects_verified, total_objects_verified_deep));
@@ -36067,23 +34812,13 @@ gc_heap::verify_heap (BOOL begin_gc_p)
     }
 
 #ifdef BACKGROUND_GC
-    if (!settings.concurrent)
-    {
-        if (current_c_gc_state == c_gc_state_planning)
-        {
-            // temporarily commenting this out 'cause an FGC
-            // could be triggered before we sweep ephemeral.
-            //verify_seg_end_mark_array_cleared();
-        }
-    }
-
     if (settings.concurrent)
     {
         verify_mark_array_cleared();
     }
     dprintf (2,("GC%d(%s): Verifying heap - end",
         VolatileLoad(&settings.gc_index),
-        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
+        (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC"))));
 #else
     dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
 #endif //BACKGROUND_GC
@@ -36116,13 +34851,7 @@ void GCHeap::ValidateObjectMember (Object* obj)
 #endif // VERIFY_HEAP
 }
 
-void DestructObject (CObjectHeader* hdr)
-{
-    UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
-    hdr->~CObjectHeader();
-}
-
-HRESULT GCHeap::StaticShutdown ()
+HRESULT GCHeap::StaticShutdown()
 {
     deleteGCShadow();
 
@@ -36240,12 +34969,6 @@ HRESULT GCHeap::Initialize()
     g_num_processors = GCToOSInterface::GetTotalProcessorCount();
     assert(g_num_processors != 0);
 
-//Initialize the static members.
-#ifdef TRACE_GC
-    GcDuration = 0;
-    CreatedObjectCount = 0;
-#endif //TRACE_GC
-
     bool is_restricted;
     gc_heap::total_physical_mem = (size_t)GCConfig::GetGCTotalPhysicalMemory();
     if (!(gc_heap::total_physical_mem))
@@ -36272,14 +34995,9 @@ HRESULT GCHeap::Initialize()
         if (is_restricted)
         {
             uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100;
-            //printf ("returned physical mem %I64d, setting it to max (75%%: %I64d, 20mb)\n",
-            //    gc_heap::total_physical_mem, physical_mem_for_gc);
             gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
         }
     }
-
-    //printf ("heap_hard_limit is %Id, total physical mem: %Id, %s restricted\n",
-    //    gc_heap::heap_hard_limit, gc_heap::total_physical_mem, (is_restricted ? "is" : "is not"));
 #endif //HOST_64BIT
 
     uint32_t nhp = 1;
@@ -36371,13 +35089,11 @@ HRESULT GCHeap::Initialize()
 
     gc_heap::min_uoh_segment_size = large_seg_size;
 
-#ifdef SEG_MAPPING_TABLE
     if (gc_heap::min_segment_size == 0)
     {
         gc_heap::min_segment_size = min (seg_size, large_seg_size);
     }
     gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size);
-#endif //SEG_MAPPING_TABLE
 
 #ifdef MULTIPLE_HEAPS
     gc_heap::n_heaps = nhp;
@@ -36714,7 +35430,6 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
 
     dprintf (3, ("Promote %Ix", (size_t)o));
 
-#ifdef INTERIOR_POINTERS
     if (flags & GC_CALL_INTERIOR)
     {
         if ((o < hp->gc_low) || (o >= hp->gc_high))
@@ -36727,7 +35442,6 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
         }
 
     }
-#endif //INTERIOR_POINTERS
 
 #ifdef FEATURE_CONSERVATIVE_GC
     // For conservative GC, a value on stack may point to middle of a free object.
@@ -36740,7 +35454,7 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
 #endif
 
 #ifdef _DEBUG
-    ((CObjectHeader*)o)->ValidatePromote(sc, flags);
+    ((CObjectHeader*)o)->Validate();
 #else
     UNREFERENCED_PARAMETER(sc);
 #endif //_DEBUG
@@ -36835,14 +35549,6 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
 
 void StressHeapDummy ();
 
-static int32_t GCStressStartCount = -1;
-static int32_t GCStressCurCount = 0;
-static int32_t GCStressStartAtJit = -1;
-
-// the maximum number of foreground GCs we'll induce during one BGC
-// (this number does not include "naturally" occurring GCs).
-static int32_t GCStressMaxFGCsPerBGC = -1;
-
 // CLRRandom implementation can produce FPU exceptions if
 // the test/application run by CLR is enabling any FPU exceptions.
 // We want to avoid any unexpected exception coming from stress
@@ -36903,46 +35609,6 @@ bool GCHeap::StressHeap(gc_alloc_context * context)
         }
 #endif //BACKGROUND_GC
 
-        if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
-        {
-            GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
-            GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
-        }
-
-        if (GCStressMaxFGCsPerBGC == -1)
-        {
-            GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
-            if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
-                GCStressMaxFGCsPerBGC = 6;
-        }
-
-#ifdef _DEBUG
-        if (g_JitCount < GCStressStartAtJit)
-            return FALSE;
-#endif //_DEBUG
-
-        // Allow programmer to skip the first N Stress GCs so that you can
-        // get to the interesting ones faster.
-        Interlocked::Increment(&GCStressCurCount);
-        if (GCStressCurCount < GCStressStartCount)
-            return FALSE;
-
-        // throttle the number of stress-induced GCs by a factor given by GCStressStep
-        if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
-        {
-            return FALSE;
-        }
-
-#ifdef BACKGROUND_GC
-        if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
-        {
-            // allow a maximum number of stress induced FGCs during one BGC
-            if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
-                return FALSE;
-            ++gc_stress_fgcs_in_bgc;
-        }
-#endif // BACKGROUND_GC
-
     if (g_pStringClass == 0)
     {
         // If the String class has not been loaded, dont do any stressing. This should
@@ -37045,10 +35711,9 @@ bool GCHeap::StressHeap(gc_alloc_context * context)
 #else
     UNREFERENCED_PARAMETER(context);
     return FALSE;
-#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
+#endif //STRESS_HEAP && !FEATURE_REDHAWK
 }
 
-
 #ifdef FEATURE_PREMORTEM_FINALIZATION
 #define REGISTER_FOR_FINALIZATION(_object, _size) \
     hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
@@ -37183,10 +35848,6 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
     }
     else
     {
-#ifdef TRACE_GC
-        AllocSmallCount++;
-#endif //TRACE_GC
-
 #ifdef FEATURE_64BIT_ALIGNMENT
         if (flags & GC_ALLOC_ALIGN8)
         {
@@ -37206,10 +35867,6 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
     }
 
     CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);
-
-#ifdef TRACE_GC
-    AllocCount++;
-#endif //TRACE_GC
     return newAlloc;
 }
 
@@ -37327,7 +35984,7 @@ GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
     dynamic_data* dd = hpt->dynamic_data_of (generation);
 
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         if ((mode == collection_optimized) || (mode & collection_non_blocking))
         {
@@ -37400,7 +36057,7 @@ retry:
         (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
     {
 #ifdef BACKGROUND_GC
-        if (recursive_gc_sync::background_running_p())
+        if (gc_heap::background_running_p())
         {
             pGenGCHeap->background_gc_wait();
         }
@@ -37490,7 +36147,7 @@ void gc_heap::do_pre_gc()
         dd_collection_count (hp->dynamic_data_of (0)),
         settings.condemned_generation,
         total_allocated_since_last_gc,
-        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
+        (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")),
         settings.b_state));
 #else
     dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)",
@@ -37510,14 +36167,6 @@ void gc_heap::do_pre_gc()
     }
 #endif //TRACE_GC
 
-    // TODO: this can happen...it's because of the way we are calling
-    // do_pre_gc, will fix later.
-    //if (last_gc_index > VolatileLoad(&settings.gc_index))
-    //{
-    //    FATAL_GC_ERROR();
-    //}
-
-    last_gc_index = VolatileLoad(&settings.gc_index);
     GCHeap::UpdatePreGCCounters();
 #if defined(__linux__)
     GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
@@ -37530,9 +36179,6 @@ void gc_heap::do_pre_gc()
     {
 #ifdef BACKGROUND_GC
         full_gc_counts[gc_type_background]++;
-#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
-        GCHeap::gc_stress_fgcs_in_bgc = 0;
-#endif // STRESS_HEAP && !FEATURE_REDHAWK
 #endif // BACKGROUND_GC
     }
     else
@@ -37685,7 +36331,7 @@ void gc_heap::check_and_adjust_bgc_tuning (int gen_number, size_t physical_size,
                 current_gen_stats->last_gen_increase_flr = gen_flr;
                 dprintf (BGC_TUNING_LOG, ("BTLp[g1: %Id, g2: %Id]: gen%d size inc %s %Id->%Id, flr: %.3f",
                         gen1_index, gen2_index, gen_number,
-                        (recursive_gc_sync::background_running_p() ? "during bgc" : ""),
+                        (gc_heap::background_running_p() ? "during bgc" : ""),
                         current_gen_stats->last_bgc_physical_size, total_gen_size, gen_flr));
             }
 
@@ -37693,7 +36339,7 @@ void gc_heap::check_and_adjust_bgc_tuning (int gen_number, size_t physical_size,
             {
                 if (bgc_tuning::enable_fl_tuning)
                 {
-                    if (!((recursive_gc_sync::background_running_p() || (hp->current_bgc_state == bgc_initialized))))
+                    if (!((gc_heap::background_running_p() || (hp->current_bgc_state == bgc_initialized))))
                     {
                         assert (settings.entry_memory_load);
 
@@ -37837,10 +36483,6 @@ void gc_heap::do_post_gc()
         initGCShadow();
     }
 
-#ifdef TRACE_GC
-    AllocStart = GCToOSInterface::QueryPerformanceCounter();
-#endif //TRACE_GC
-
 #ifdef MULTIPLE_HEAPS
     gc_heap* hp = g_heaps[0];
 #else
@@ -37901,7 +36543,7 @@ void gc_heap::do_post_gc()
         dd_collection_count(hp->dynamic_data_of(0)),
         GetHighPrecisionTimeStamp(),
         settings.condemned_generation,
-        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
+        (settings.concurrent ? "BGC" : (gc_heap::background_running_p() ? "FGC" : "NGC")),
         (settings.compaction ? "C" : "S"),
         (settings.promotion ? "P" : "S"),
         settings.entry_memory_load,
@@ -38049,15 +36691,11 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
          }
     }
 
-#ifdef TRACE_GC
-    AllocDuration += GCToOSInterface::QueryPerformanceCounter() - AllocStart;
-#endif //TRACE_GC
-
-        gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
-                                       (reason == reason_lowmemory_blocking) ||
-                                       (gc_heap::latency_level == latency_level_memory_footprint);
+    gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
+                                    (reason == reason_lowmemory_blocking) ||
+                                    (gc_heap::latency_level == latency_level_memory_footprint);
 
-        gc_trigger_reason = reason;
+    gc_trigger_reason = reason;
 
 #ifdef MULTIPLE_HEAPS
     for (int i = 0; i < gc_heap::n_heaps; i++)
@@ -38090,15 +36728,6 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
 #endif //!MULTIPLE_HEAPS
     }
 
-// MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));
-
-#ifdef TRACE_GC
-    int64_t start;
-    int64_t finish;
-    start = GCToOSInterface::QueryPerformanceCounter();
-    PromotedObjectCount = 0;
-#endif //TRACE_GC
-
     unsigned int condemned_generation_number = gen;
 
     // We want to get a stack from the user thread that triggered the GC
@@ -38132,19 +36761,6 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
     }
 #endif //MULTIPLE_HEAPS
 
-#ifdef TRACE_GC
-    finish = GCToOSInterface::QueryPerformanceCounter();
-    GcDuration += finish - start;
-    dprintf (3,
-             ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
-              VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
-              finish - start, GcDuration,
-              AllocCount ? (AllocDuration / AllocCount) : 0,
-              AllocSmallCount, AllocBigCount));
-    AllocCount = 0;
-    AllocDuration = 0;
-#endif // TRACE_GC
-
 #ifdef BACKGROUND_GC
     // We are deciding whether we should fire the alloc wait end event here
     // because in begin_foreground we could be calling end_foreground
@@ -38249,8 +36865,6 @@ int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count)
 size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
 {
     size_t totsize = 0;
-    //GCTODO
-    //ASSERT(InMustComplete());
     enter_spin_lock (&pGenGCHeap->gc_lock);
 
     heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
@@ -38405,7 +37019,7 @@ int GCHeap::SetGcLatencyMode (int newLatencyMode)
     }
 
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         // If we get here, it means we are doing an FGC. If the pause
         // mode was altered we will need to save it in the BGC settings.
@@ -38528,8 +37142,6 @@ void GCHeap::PublishObject (uint8_t* Obj)
 // triggering a GC of any kind.
 size_t GCHeap::ApproxFreeBytes()
 {
-    //GCTODO
-    //ASSERT(InMustComplete());
     enter_spin_lock (&pGenGCHeap->gc_lock);
 
     generation* gen = pGenGCHeap->generation_of (0);
@@ -38574,7 +37186,6 @@ size_t GCHeap::GetValidSegmentSize(bool large_seg)
     return (large_seg ? gc_heap::min_uoh_segment_size : gc_heap::soh_segment_size);
 }
 
-// Get the max gen0 heap size, making sure it conforms.
 size_t gc_heap::get_gen0_min_size()
 {
     size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
@@ -38750,7 +37361,6 @@ bool GCHeap::RegisterForFinalization (int gen, Object* obj)
         gen = 0;
     if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
     {
-        //just reset the bit
         ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
         return true;
     }
@@ -38878,7 +37488,6 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
 
     if (g_fFinalizerRunOnShutDown)
     {
-        //no method table available yet,
         //put it in the finalizer queue and sort out when
         //dequeueing
         dest = FinalizerListSeg;
@@ -38939,7 +37548,6 @@ Object*
 CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
 {
     Object* obj = 0;
-    //serialize
     EnterFinalizeLock();
 
 retry:
@@ -39083,7 +37691,6 @@ void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
     Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
     for (Object** po = startIndex; po < stopIndex; po++)
     {
-        //report *po
         fn(po < stopCriticalIndex, *po);
     }
 }
@@ -39127,7 +37734,6 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
                     {
                         //remove the object because we don't want to
                         //run the finalizer
-
                         MoveItem (i, Seg, FreeList);
 
                         //Reset the bit so it will be put back on the queue
@@ -39152,7 +37758,7 @@ CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
 #ifdef BACKGROUND_GC
                 else
                 {
-                    if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
+                    if ((gen == max_generation) && (gc_heap::background_running_p()))
                     {
                         // TODO - fix the following line.
                         //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
@@ -39231,8 +37837,7 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
     }
     else
     {
-        //Look for demoted or promoted plugs
-
+        //Look for demoted or promoted objects
         for (int i = gen; i >= 0; i--)
         {
             unsigned int Seg = gen_segment (i);
@@ -39257,7 +37862,6 @@ CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
                         po--;
                     }
                 }
-
             }
         }
     }
@@ -39272,9 +37876,6 @@ CFinalize::GrowArray()
     Object** newArray = new (nothrow) Object*[newArraySize];
     if (!newArray)
     {
-        // It's not safe to throw here, because of the FinalizeLock.  Tell our caller
-        // to throw for us.
-//        ASSERT (newArray);
         return FALSE;
     }
     memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));
@@ -39469,33 +38070,12 @@ void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
 
 void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
 {
-    UNREFERENCED_PARAMETER(gen_number);
-    GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
+    GCScan::GcScanHandlesForProfilerAndETW (gen_number, context, fn);
 }
 
 void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
 {
-    UNREFERENCED_PARAMETER(gen_number);
-    GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
-}
-
-// Go through and touch (read) each page straddled by a memory block.
-void TouchPages(void * pStart, size_t cb)
-{
-    const size_t pagesize = OS_PAGE_SIZE;
-    _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
-    if (cb)
-    {
-        VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
-        VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) -  (((size_t)pStart) & (pagesize-1)));
-        while (p < pEnd)
-        {
-            char a;
-            a = VolatileLoad(p);
-            //printf("Touching page %lxh\n", (uint32_t)p);
-            p += pagesize;
-        }
-    }
+    GCScan::GcScanDependentHandlesForProfilerAndETW (gen_number, context, fn);
 }
 
 #if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
@@ -39699,7 +38279,6 @@ void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_
 #else
     uint8_t *o = heap_segment_mem(seg);
 
-    // small heap alignment constant
     int alignment = get_alignment_constant(TRUE);
 
     while (o < heap_segment_allocated(seg))
@@ -39726,7 +38305,7 @@ void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_
 HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
 {
 #ifdef BACKGROUND_GC
-    if (recursive_gc_sync::background_running_p())
+    if (gc_heap::background_running_p())
     {
         uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
         if (dwRet == WAIT_OBJECT_0)
index 4d1aa42..75893bb 100644 (file)
@@ -73,8 +73,6 @@ public:
         "When set we put the segments that should be deleted on a standby list (instead of "     \
         "releasing them back to the OS) which will be considered to satisfy new segment requests"\
         " (note that the same thing can be specified via API which is the supported way)")        \
-    BOOL_CONFIG(StressMix,    "GCStressMix",  false,                                             \
-        "Specifies whether the GC mix mode is enabled or not")                                   \
     BOOL_CONFIG(BreakOnOOM,   "GCBreakOnOOM", false,                                             \
         "Does a DebugBreak at the soonest time we detect an OOM")                                \
     BOOL_CONFIG(NoAffinitize, "GCNoAffinitize", false,                                           \
@@ -126,8 +124,6 @@ public:
     STRING_CONFIG(LogFile,    "GCLogFile",    "Specifies the name of the GC log file")           \
     STRING_CONFIG(ConfigLogFile, "GCConfigLogFile",                                              \
         "Specifies the name of the GC config log file")                                          \
-    STRING_CONFIG(MixLogFile, "GCMixLog",                                                        \
-        "Specifies the name of the log file for GC mix statistics")                              \
     INT_CONFIG(BGCFLTuningEnabled, "BGCFLTuningEnabled", 0, "Enables FL tuning")                 \
     INT_CONFIG(BGCMemGoal, "BGCMemGoal", 75, "Specifies the physical memory load goal")          \
     INT_CONFIG(BGCMemGoalSlack, "BGCMemGoalSlack", 10,                                           \
index 05feeb8..8b7765d 100644 (file)
@@ -21,9 +21,6 @@
 #endif // __GNUC__
 
 #include "gc.h"
-
-//#define DT_LOG
-
 #include "gcrecord.h"
 
 #ifdef _MSC_VER
@@ -51,8 +48,6 @@ inline void FATAL_GC_ERROR()
 #pragma inline_depth(20)
 #endif
 
-/* the following section defines the optional features */
-
 // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
 // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION
 // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much
@@ -102,31 +97,15 @@ inline void FATAL_GC_ERROR()
 
 //#define MULTIPLE_HEAPS         //Allow multiple heaps for servers
 
-#define INTERIOR_POINTERS   //Allow interior pointers in the code manager
-
 #define CARD_BUNDLE         //enable card bundle feature.(requires WRITE_WATCH)
 
 // #define ALLOW_REFERENCES_IN_POH  //Allow POH objects to contain references.
 
-// If this is defined we use a map for segments in order to find the heap for
-// a segment fast. But it does use more memory as we have to cover the whole
-// heap range and for each entry we allocate a struct of 5 ptr-size words
-// (3 for WKS as there's only one heap).
-#define SEG_MAPPING_TABLE
-
-// If allocating the heap mapping table for the available VA consumes too
-// much memory, you can enable this to allocate only the portion that
-// corresponds to rw segments and grow it when needed in grow_brick_card_table.
-// However in heap_of you will need to always compare the address with
-// g_lowest/highest before you can look at the heap mapping table.
-#define GROWABLE_SEG_MAPPING_TABLE
-
 #ifdef BACKGROUND_GC
-#define MARK_ARRAY      //Mark bit in an array
 #define BGC_SERVO_TUNING
 #endif //BACKGROUND_GC
 
-#if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
+#if defined(BACKGROUND_GC) || defined(CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP)
 #define WRITE_WATCH     //Write Watch feature
 #endif //BACKGROUND_GC || CARD_BUNDLE
 
@@ -140,11 +119,10 @@ inline void FATAL_GC_ERROR()
 #define MAX_LONGPATH 1024
 #endif // MAX_LONGPATH
 
-//#define TRACE_GC          //debug trace gc operation
+//#define TRACE_GC
 //#define SIMPLE_DPRINTF
 
 //#define JOIN_STATS         //amount of time spent in the join
-//also, see TIME_SUSPEND in switches.h.
 
 //#define SYNCHRONIZATION_STATS
 //#define SEG_REUSE_STATS
@@ -159,24 +137,16 @@ inline void FATAL_GC_ERROR()
     x##_end = GCToOSInterface::QueryPerformanceCounter(); \
     x += x##_end - x##_start
 
-#else
+#else //SYNCHRONIZATION_STATS
 #define BEGIN_TIMING(x)
 #define END_TIMING(x)
-#define BEGIN_TIMING_CYCLES(x)
-#define END_TIMING_CYCLES(x)
 #endif //SYNCHRONIZATION_STATS
 
-/* End of optional features */
-
 #ifdef GC_CONFIG_DRIVEN
 void GCLogConfig (const char *fmt, ... );
 #define cprintf(x) {GCLogConfig x;}
 #endif //GC_CONFIG_DRIVEN
 
-#ifdef _DEBUG
-#define TRACE_GC
-#endif
-
 // For the bestfit algorithm when we relocate ephemeral generations into an
 // existing gen2 segment.
 // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total.
@@ -205,8 +175,6 @@ void GCLogConfig (const char *fmt, ... );
 #define MAX_NUM_FREE_SPACES 200
 #define MIN_NUM_FREE_SPACES 5
 
-//Please leave these definitions intact.
-// hosted api
 #ifdef memcpy
 #undef memcpy
 #endif //memcpy
@@ -245,15 +213,16 @@ const int policy_compact = 1;
 const int policy_expand  = 2;
 
 #ifdef TRACE_GC
-#define SEG_REUSE_LOG_0 7
-#define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1)
-#define DT_LOG_0 (SEG_REUSE_LOG_1 + 1)
-#define BGC_TUNING_LOG (DT_LOG_0 + 1)
-#define GTC_LOG (DT_LOG_0 + 2)
-#define GC_TABLE_LOG (DT_LOG_0 + 3)
-#define JOIN_LOG (DT_LOG_0 + 4)
-#define SPINLOCK_LOG (DT_LOG_0 + 5)
-#define SNOOP_LOG (DT_LOG_0 + 6)
+#define MIN_CUSTOM_LOG_LEVEL 7
+#define SEG_REUSE_LOG_0 (MIN_CUSTOM_LOG_LEVEL)
+#define SEG_REUSE_LOG_1 (MIN_CUSTOM_LOG_LEVEL + 1)
+#define DT_LOG_0 (MIN_CUSTOM_LOG_LEVEL + 2)
+#define BGC_TUNING_LOG (MIN_CUSTOM_LOG_LEVEL + 3)
+#define GTC_LOG (MIN_CUSTOM_LOG_LEVEL + 4)
+#define GC_TABLE_LOG (MIN_CUSTOM_LOG_LEVEL + 5)
+#define JOIN_LOG (MIN_CUSTOM_LOG_LEVEL + 6)
+#define SPINLOCK_LOG (MIN_CUSTOM_LOG_LEVEL + 7)
+#define SNOOP_LOG (MIN_CUSTOM_LOG_LEVEL + 8)
 
 // NOTE! This is for HEAP_BALANCE_INSTRUMENTATION
 // This particular one is special and needs to be well formatted because we
@@ -268,18 +237,8 @@ const int policy_expand  = 2;
 
 #ifdef SIMPLE_DPRINTF
 
-//#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
 void GCLog (const char *fmt, ... );
-//#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
-//#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
-//#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
-//#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
-//#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
 #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
-//#define dprintf(l,x) {if (l == HEAP_BALANCE_LOG) {GCLog x;}}
-//#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
-//#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
-//#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
 #else //SIMPLE_DPRINTF
 // Nobody used the logging mechanism that used to be here. If we find ourselves
 // wanting to inspect GC logs on unmodified builds, we can use this define here
@@ -346,9 +305,7 @@ typedef GCDebugSpinLock GCSpinLock;
 class mark;
 class heap_segment;
 class CObjectHeader;
-class l_heap;
 class sorted_table;
-class c_synchronize;
 class seg_free_spaces;
 class gc_heap;
 
@@ -605,57 +562,6 @@ public:
     }
 };
 
-#ifdef GC_STATS
-
-// GC specific statistics, tracking counts and timings for GCs occuring in the system.
-// This writes the statistics to a file every 60 seconds, if a file is specified in
-// COMPlus_GcMixLog
-
-struct GCStatistics
-    : public StatisticsBase
-{
-    // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present
-    static char* logFileName;
-    static FILE*  logFile;
-
-    // number of times we executed a background GC, a foreground GC, or a
-    // non-concurrent GC
-    int cntBGC, cntFGC, cntNGC;
-
-    // min, max, and total time spent performing BGCs, FGCs, NGCs
-    // (BGC time includes everything between the moment the BGC starts until
-    // it completes, i.e. the times of all FGCs occuring concurrently)
-    MinMaxTot bgc, fgc, ngc;
-
-    // number of times we executed a compacting GC (sweeping counts can be derived)
-    int cntCompactNGC, cntCompactFGC;
-
-    // count of reasons
-    int cntReasons[reason_max];
-
-    // count of condemned generation, by NGC and FGC:
-    int cntNGCGen[max_generation];
-    int cntFGCGen[max_generation];
-
-    ///////////////////////////////////////////////////////////////////////////////////////////////
-    // Internal mechanism:
-
-    virtual void Initialize();
-    virtual void DisplayAndUpdate();
-
-    // Public API
-
-    static BOOL Enabled()
-    { return logFileName != NULL; }
-
-    void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec);
-};
-
-extern GCStatistics g_GCStatistics;
-extern GCStatistics g_LastGCStatistics;
-
-#endif // GC_STATS
-
 typedef DPTR(class heap_segment)               PTR_heap_segment;
 typedef DPTR(class gc_heap)                    PTR_gc_heap;
 typedef DPTR(PTR_gc_heap)                      PTR_PTR_gc_heap;
@@ -896,7 +802,6 @@ public:
 
 #define ro_in_entry 0x1
 
-#ifdef SEG_MAPPING_TABLE
 // Note that I am storing both h0 and seg0, even though in Server GC you can get to
 // the heap* from the segment info. This is because heap_of needs to be really fast
 // and we would not want yet another indirection.
@@ -920,7 +825,6 @@ struct seg_mapping
     // with 0x1 to indicate that there is a ro segment for
     // this entry.
 };
-#endif //SEG_MAPPING_TABLE
 
 // alignment helpers
 //Alignment constant for allocation
@@ -1213,14 +1117,6 @@ public:
 
     PER_HEAP
     void verify_soh_segment_list();
-    PER_HEAP
-    void verify_mark_array_cleared (heap_segment* seg);
-    PER_HEAP
-    void verify_mark_array_cleared();
-    PER_HEAP
-    void verify_seg_end_mark_array_cleared();
-    PER_HEAP
-    void verify_partial();
 
 #ifdef VERIFY_HEAP
     PER_HEAP
@@ -1243,8 +1139,6 @@ public:
     heap_segment* make_heap_segment (uint8_t* new_pages,
                                      size_t size,
                                      int h_number);
-    static
-    l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed);
 
     static
     gc_heap* make_gc_heap(
@@ -1536,9 +1430,7 @@ protected:
     size_t get_full_compact_gc_count();
 
     PER_HEAP
-    BOOL short_on_end_of_seg (int gen_number,
-                              heap_segment* seg,
-                              int align_const);
+    BOOL short_on_end_of_seg (heap_segment* seg, int align_const);
 
     PER_HEAP
     BOOL a_fit_free_list_p (int gen_number,
@@ -1626,7 +1518,6 @@ protected:
     PER_HEAP
     BOOL uoh_get_new_seg (int gen_number,
                           size_t size,
-                          int align_const,
                           BOOL* commit_failed_p,
                           oom_reason* oom_r);
 
@@ -1709,12 +1600,12 @@ protected:
     PER_HEAP
     void fix_allocation_contexts (BOOL for_gc_p);
     PER_HEAP
-    void fix_youngest_allocation_area (BOOL for_gc_p);
+    void fix_youngest_allocation_area();
     PER_HEAP
     void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
                                  int align_const);
     PER_HEAP
-    void fix_uoh_allocation_area (BOOL for_gc_p);
+    void fix_uoh_allocation_area();
     PER_HEAP
     void fix_older_allocation_area (generation* older_gen);
     PER_HEAP
@@ -1738,8 +1629,6 @@ protected:
     PER_HEAP
     BOOL set_ro_segment_in_range (heap_segment* seg);
     PER_HEAP
-    BOOL unprotect_segment (heap_segment* seg);
-    PER_HEAP
     heap_segment* soh_get_segment_to_expand();
     PER_HEAP
     heap_segment* get_segment (size_t size, BOOL loh_p);
@@ -1796,8 +1685,7 @@ protected:
     PER_HEAP
     void adjust_ephemeral_limits();
     PER_HEAP
-    void make_generation (int gen_num, heap_segment* seg, uint8_t* start, uint8_t* pointer);
-
+    void make_generation (int gen_num, heap_segment* seg, uint8_t* start);
 
 #define USE_PADDING_FRONT 1
 #define USE_PADDING_TAIL  2
@@ -1810,7 +1698,7 @@ protected:
                        int align_const);
 
     PER_HEAP
-    void handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
+    void handle_oom (oom_reason reason, size_t alloc_size,
                      uint8_t* allocated, uint8_t* reserved);
 
     PER_HEAP
@@ -1868,7 +1756,6 @@ protected:
     PER_HEAP
     void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
                                 short* old_brick_table,
-                                heap_segment* seg,
                                 uint8_t* start, uint8_t* end);
     PER_HEAP
     void init_brick_card_range (heap_segment* seg);
@@ -1882,7 +1769,7 @@ protected:
     void set_brick (size_t index, ptrdiff_t val);
     PER_HEAP
     int get_brick_entry (size_t index);
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     PER_HEAP
     unsigned int mark_array_marked (uint8_t* add);
     PER_HEAP
@@ -1903,15 +1790,12 @@ protected:
         , BOOL read_only=FALSE
 #endif // FEATURE_BASICFREEZE
         );
-#ifdef BACKGROUND_GC
     PER_HEAP
     void seg_clear_mark_array_bits_soh (heap_segment* seg);
     PER_HEAP
     void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
     PER_HEAP
     void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end);
-    PER_HEAP
-    void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p);
 #ifdef VERIFY_HEAP
     PER_HEAP
     void set_batch_mark_array_bits (uint8_t* start, uint8_t* end);
@@ -1919,7 +1803,6 @@ protected:
     void check_batch_mark_array_bits (uint8_t* start, uint8_t* end);
 #endif //VERIFY_HEAP
 #endif //BACKGROUND_GC
-#endif //MARK_ARRAY
 
     PER_HEAP
     BOOL uoh_object_marked (uint8_t* o, BOOL clearp);
@@ -1936,8 +1819,7 @@ protected:
     void check_for_full_gc (int gen_num, size_t size);
 
     PER_HEAP
-    void adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
-                       int gen_number);
+    void adjust_limit (uint8_t* start, size_t limit_size, generation* gen);
     PER_HEAP
     void adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
                            alloc_context* acontext, uint32_t flags, heap_segment* seg,
@@ -1981,19 +1863,11 @@ protected:
 #endif //SHORT_PLUGS
                                              uint8_t* old_loc=0
                                              REQD_ALIGN_AND_OFFSET_DEFAULT_DCL);
-#ifdef INTERIOR_POINTERS
     // Verifies that interior is actually in the range of seg; otherwise
     // returns 0.
     PER_HEAP_ISOLATED
     heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p);
 
-    PER_HEAP
-    heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p);
-
-    PER_HEAP
-    uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high);
-#endif //INTERIOR_POINTERS
-
     PER_HEAP_ISOLATED
     gc_heap* heap_of (uint8_t* object);
 
@@ -2029,8 +1903,6 @@ protected:
     PER_HEAP
     void set_allocator_next_pin (generation* gen);
     PER_HEAP
-    void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit);
-    PER_HEAP
     void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len);
     PER_HEAP
     void enque_pinned_plug (uint8_t* plug,
@@ -2039,11 +1911,6 @@ protected:
     PER_HEAP
     void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size);
     PER_HEAP
-    void set_pinned_info (uint8_t* last_pinned_plug,
-                          size_t plug_len,
-                          uint8_t* alloc_pointer,
-                          uint8_t*& alloc_limit);
-    PER_HEAP
     void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen);
     PER_HEAP
     void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug);
@@ -2080,7 +1947,7 @@ protected:
     PER_HEAP
     BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high);
     PER_HEAP
-    uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL);
+    void mark_object (uint8_t* o THREAD_NUMBER_DCL);
 #ifdef HEAP_ANALYZE
     PER_HEAP
     void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL);
@@ -2096,7 +1963,6 @@ protected:
 #endif //MH_SC_MARK
 
 #ifdef BACKGROUND_GC
-
     PER_HEAP
     BOOL background_marked (uint8_t* o);
     PER_HEAP
@@ -2174,6 +2040,10 @@ protected:
     PER_HEAP
     void bgc_verify_mark_array_cleared (heap_segment* seg);
     PER_HEAP
+    void verify_mark_array_cleared();
+    PER_HEAP
+    void verify_partial();
+    PER_HEAP
     void verify_mark_bits_cleared (uint8_t* obj, size_t s);
     PER_HEAP
     void clear_all_mark_array();
@@ -2565,7 +2435,7 @@ protected:
     BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit);
 
     PER_HEAP
-    uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size);
+    uint8_t* loh_allocate_in_condemned (size_t size);
 
     PER_HEAP_ISOLATED
     BOOL loh_object_p (uint8_t* o);
@@ -2744,7 +2614,7 @@ protected:
     PER_HEAP
     uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object);
     PER_HEAP
-    uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating);
+    uint8_t* compute_next_boundary (int gen_number, BOOL relocating);
     PER_HEAP
     void keep_card_live (uint8_t* o, size_t& n_gen,
                          size_t& cg_pointers_found);
@@ -2975,8 +2845,6 @@ protected:
     void mark_through_cards_for_uoh_objects(card_fn fn, int oldest_gen_num, BOOL relocating
                                               CARD_MARKING_STEALING_ARG(gc_heap* hpt));
     PER_HEAP
-    void descr_segment (heap_segment* seg);
-    PER_HEAP
     void descr_generations (BOOL begin_gc_p);
 
     PER_HEAP_ISOLATED
@@ -3007,23 +2875,14 @@ protected:
 #endif
 #endif //MULTIPLE_HEAPS
 
-    /*------------ End of Multiple non isolated heaps ---------*/
-
-#ifndef SEG_MAPPING_TABLE
-    PER_HEAP_ISOLATED
-    heap_segment* segment_of (uint8_t* add,  ptrdiff_t & delta,
-                              BOOL verify_p = FALSE);
-#endif //SEG_MAPPING_TABLE
-
 #ifdef BACKGROUND_GC
 
-    //this is called by revisit....
     PER_HEAP
     uint8_t* high_page (heap_segment* seg, BOOL concurrent_p);
 
     PER_HEAP
-    void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p,
-                               heap_segment* seg,  uint8_t*& last_page,
+    void revisit_written_page (uint8_t* page, uint8_t* end, 
+                               BOOL concurrent_p, uint8_t*& last_page,
                                uint8_t*& last_object, BOOL large_objects_p,
                                size_t& num_marked_objects);
     PER_HEAP
@@ -3042,9 +2901,6 @@ protected:
     void restart_EE ();
 
     PER_HEAP
-    void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags);
-
-    PER_HEAP
     void background_scan_dependent_handles (ScanContext *sc);
 
     PER_HEAP
@@ -3064,9 +2920,6 @@ protected:
     void clear_commit_flag_global();
 
     PER_HEAP_ISOLATED
-    void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr);
-
-    PER_HEAP_ISOLATED
     void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr);
 
     PER_HEAP_ISOLATED
@@ -3095,7 +2948,7 @@ protected:
     // committed. All rw segments are in range, only ro segments
     // can be partial in range.
     PER_HEAP
-    BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr);
+    BOOL commit_mark_array_bgc_init();
 
     PER_HEAP
     BOOL commit_new_mark_array (uint32_t* new_mark_array);
@@ -3149,6 +3002,8 @@ protected:
     PER_HEAP
     uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE);
     PER_HEAP_ISOLATED
+    BOOL background_running_p() { return gc_background_running; }
+    PER_HEAP_ISOLATED
     void start_c_gc();
     PER_HEAP
     void kill_gc_thread();
@@ -3288,20 +3143,20 @@ public:
     PER_HEAP
     short* brick_table;
 
-#ifdef MARK_ARRAY
+#ifdef BACKGROUND_GC
     PER_HEAP
     uint32_t* mark_array;
-#endif //MARK_ARRAY
+#endif //BACKGROUND_GC
 
 #ifdef CARD_BUNDLE
     PER_HEAP
     uint32_t* card_bundle_table;
 #endif //CARD_BUNDLE
 
-#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
+#ifdef FEATURE_BASICFREEZE
     PER_HEAP_ISOLATED
     sorted_table* seg_table;
-#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
+#endif //FEATURE_BASICFREEZE
 
     PER_HEAP_ISOLATED
     VOLATILE(BOOL) gc_started;
@@ -3526,21 +3381,16 @@ public:
     PER_HEAP_ISOLATED
     bool use_large_pages_p;
 
-    PER_HEAP_ISOLATED
-    size_t last_gc_index;
-
 #ifdef HEAP_BALANCE_INSTRUMENTATION
     PER_HEAP_ISOLATED
     size_t last_gc_end_time_ms;
 #endif //HEAP_BALANCE_INSTRUMENTATION
 
-#ifdef SEG_MAPPING_TABLE
     PER_HEAP_ISOLATED
     size_t min_segment_size;
 
     PER_HEAP_ISOLATED
     size_t min_segment_size_shr;
-#endif //SEG_MAPPING_TABLE
 
     // For SOH we always allocate segments of the same
     // size unless no_gc_region requires larger ones.
@@ -3665,6 +3515,9 @@ protected:
     PER_HEAP_ISOLATED
     gc_history_global bgc_data_global;
 
+    PER_HEAP_ISOLATED
+    VOLATILE(BOOL) gc_background_running;
+
     PER_HEAP
     gc_history_per_heap bgc_data_per_heap;
 
@@ -4190,7 +4043,7 @@ protected:
 
 #ifdef BACKGROUND_GC
     PER_HEAP
-    heap_segment* freeable_small_heap_segment;
+    heap_segment* freeable_soh_segment;
 #endif //BACKGROUND_GC
 
     PER_HEAP
index a4ca132..cc2c555 100644 (file)
@@ -895,7 +895,7 @@ static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj)
     MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable());
     pMT->SanityCheck();
 #else
-    obj->ValidateHeap(from);
+    obj->ValidateHeap();
 #endif // FEATURE_REDHAWK
 }
 
index fb643f4..df0d44a 100644 (file)
@@ -274,12 +274,8 @@ CONFIG_STRING_INFO(INTERNAL_SkipGCCoverage, W("SkipGcCoverage"), "Specify a list
 RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_gcForceCompact, W("gcForceCompact"), "When set to true, always do compacting GC")
 RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCgen0size, W("GCgen0size"), "Specifies the smallest gen0 size")
 RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCGen0MaxBudget, W("GCGen0MaxBudget"), "Specifies the largest gen0 allocation budget")
-RETAIL_CONFIG_DWORD_INFO(INTERNAL_GCStressMix, W("GCStressMix"), 0, "Specifies whether the GC mix mode is enabled or not")
-RETAIL_CONFIG_DWORD_INFO(INTERNAL_GCStressStep, W("GCStressStep"), 1, "Specifies how often StressHeap will actually do a GC in GCStressMix mode")
-RETAIL_CONFIG_DWORD_INFO(INTERNAL_GCStressMaxFGCsPerBGC, W("GCStressMaxFGCsPerBGC"), ~0U, "Specifies how many FGCs will occur during one BGC in GCStressMix mode")
 RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_StatsUpdatePeriod, W("StatsUpdatePeriod"), 60, "Specifies the interval, in seconds, at which to update the statistics")
 RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_SuspendTimeLog, W("SuspendTimeLog"), "Specifies the name of the log file for suspension statistics")
-RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_GCMixLog, W("GCMixLog"), "Specifies the name of the log file for GC mix statistics")
 CONFIG_DWORD_INFO_DIRECT_ACCESS(INTERNAL_GCLatencyMode, W("GCLatencyMode"), "Specifies the GC latency mode - batch, interactive or low latency (note that the same thing can be specified via API which is the supported way)")
 RETAIL_CONFIG_DWORD_INFO(EXTERNAL_GCLatencyLevel, W("GCLatencyLevel"), 1, "Specifies the GC latency level that you want to optimize for")
 RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCConfigLogEnabled, W("GCConfigLogEnabled"), 0, "Specifies if you want to turn on config logging in GC")
@@ -296,8 +292,6 @@ RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCLOHCompact, W("GCLOHCompact
 RETAIL_CONFIG_DWORD_INFO(EXTERNAL_gcAllowVeryLargeObjects, W("gcAllowVeryLargeObjects"), 1, "Allow allocation of 2GB+ objects on GC heap")
 RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_GCStress, W("GCStress"), 0, "Trigger GCs at regular intervals", CLRConfig::REGUTIL_default)
 CONFIG_DWORD_INFO_EX(INTERNAL_GcStressOnDirectCalls, W("GcStressOnDirectCalls"), 0, "Whether to trigger a GC on direct calls", CLRConfig::REGUTIL_default)
-RETAIL_CONFIG_DWORD_INFO(EXTERNAL_GCStressStart, W("GCStressStart"), 0, "Start GCStress after N stress GCs have been attempted")
-RETAIL_CONFIG_DWORD_INFO(INTERNAL_GCStressStartAtJit, W("GCStressStartAtJit"), 0, "Start GCStress after N items are jitted")
 RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_gcTrimCommitOnLowMemory, W("gcTrimCommitOnLowMemory"), "When set we trim the committed space more aggressively for the ephemeral seg. This is used for running many instances of server processes where they want to keep as little memory committed as possible")
 RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_BGCSpinCount, W("BGCSpinCount"), 140, "Specifies the bgc spin count")
 RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_BGCSpin, W("BGCSpin"), 2, "Specifies the bgc spin time")
index 7147136..1053058 100644 (file)
@@ -103,11 +103,6 @@ HRESULT EEConfig::Init()
     iGCStress     = 0;
 #endif
 
-#ifdef STRESS_HEAP
-    iGCStressMix  = 0;
-    iGCStressStep = 1;
-#endif
-
     fGCBreakOnOOM = false;
     iGCgen0size = 0;
     iGCSegmentSize = 0;
@@ -507,17 +502,6 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_
 
 #ifdef STRESS_HEAP
     BOOL bGCStressAndHeapVerifyAllowed = true;
-    iGCStressMix        =  CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMix);
-    iGCStressStep       =  CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStep);
-
-    // For GC stress mix mode ensure reasonable defaults
-    if (iGCStressMix != 0)
-    {
-        if (iGCStress == 0)
-            iGCStress |= int(GCSTRESS_ALLOC) | int(GCSTRESS_TRANSITION);
-        if (iGCStressStep == 0 || iGCStressStep == 1)
-            iGCStressStep = 0x10;
-    }
 
     if (iGCStress)
     {
@@ -558,21 +542,14 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_
             else
             {
                 // If GCStress was enabled, and
-                // If GcConcurrent was NOT explicitly specified in the environment, and
-                // If GSCtressMix was NOT specified
-                // Then let's turn off concurrent GC since it make objects move less
-                if (iGCStressMix == 0)
-                {
-                    iGCconcurrent   =
-                    g_IGCconcurrent = 0;
-                }
+                // If GcConcurrent was NOT explicitly specified in the environment,
+                // then let's turn off concurrent GC since it make objects move less
+                iGCconcurrent = g_IGCconcurrent = 0;
             }
         }
         else
         {
             iGCStress = 0;
-            iGCStressMix  = 0;
-            iGCStressStep = 1;
         }
     }
 
index cd9deac..830cd8e 100644 (file)
@@ -399,12 +399,6 @@ public:
     GCStressFlags GetGCStressLevel()        const { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GCStressFlags(iGCStress); }
 #endif
 
-#ifdef STRESS_HEAP
-
-    bool    IsGCStressMix  ()               const {LIMITED_METHOD_CONTRACT;  return iGCStressMix != 0;}
-    int     GetGCStressStep()               const {LIMITED_METHOD_CONTRACT;  return iGCStressStep; }
-#endif
-
     bool    IsGCBreakOnOOMEnabled()         const {LIMITED_METHOD_CONTRACT;  return fGCBreakOnOOM; }
 
     size_t  GetGCgen0size  ()               const {LIMITED_METHOD_CONTRACT;  return iGCgen0size;   }
@@ -693,11 +687,6 @@ private: //----------------------------------------------------------------
     int  iGCStress;
 #endif
 
-#ifdef STRESS_HEAP
-    int  iGCStressMix;
-    int  iGCStressStep;
-#endif
-
 #define DEFAULT_GC_PRN_LVL 3
     size_t iGCgen0size;
     size_t iGCSegmentSize;
index d2e0cb9..8369114 100644 (file)
@@ -281,19 +281,7 @@ CHARARRAYREF AllocateCharArray(DWORD dwArrayLength)
     return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength);
 }
 
-void Object::ValidatePromote(ScanContext *sc, DWORD flags)
-{
-    STATIC_CONTRACT_NOTHROW;
-    STATIC_CONTRACT_GC_NOTRIGGER;
-    STATIC_CONTRACT_FORBID_FAULT;
-
-
-#if defined (VERIFY_HEAP)
-    Validate();
-#endif
-}
-
-void Object::ValidateHeap(Object *from, BOOL bDeep)
+void Object::ValidateHeap(BOOL bDeep)
 {
     STATIC_CONTRACT_NOTHROW;
     STATIC_CONTRACT_GC_NOTRIGGER;
index 8a09151..ba51875 100644 (file)
@@ -254,11 +254,8 @@ class Object
     // assert.
     BOOL ValidateObjectWithPossibleAV();
 
-    // Validate an object ref out of the Promote routine in the GC
-    void ValidatePromote(ScanContext *sc, DWORD flags);
-
     // Validate an object ref out of the VerifyHeap routine in the GC
-    void ValidateHeap(Object *from, BOOL bDeep=TRUE);
+    void ValidateHeap(BOOL bDeep=TRUE);
 
     PTR_SyncBlock PassiveGetSyncBlock()
     {