Low memory fixes (#76468)
authorAndrew Au <andrewau@microsoft.com>
Fri, 7 Oct 2022 00:48:42 +0000 (17:48 -0700)
committerGitHub <noreply@github.com>
Fri, 7 Oct 2022 00:48:42 +0000 (17:48 -0700)
src/coreclr/gc/gc.cpp
src/coreclr/gc/gcpriv.h

index 81a8453..ee74787 100644 (file)
@@ -2331,7 +2331,12 @@ size_t      gc_heap::heap_hard_limit = 0;
 size_t      gc_heap::heap_hard_limit_oh[total_oh_count];
 
 #ifdef USE_REGIONS
+
 size_t      gc_heap::regions_range = 0;
+
+size_t      gc_heap::heap_hard_limit_for_heap = 0;
+size_t      gc_heap::heap_hard_limit_for_bookkeeping = 0;
+
 #endif //USE_REGIONS
 
 bool        affinity_config_specified_p = false;
@@ -2516,6 +2521,8 @@ int         gc_heap::num_sip_regions = 0;
 
 size_t      gc_heap::end_gen0_region_space = 0;
 
+size_t      gc_heap::end_gen0_region_committed_space = 0;
+
 size_t      gc_heap::gen0_pinned_free_space = 0;
 
 bool        gc_heap::gen0_large_chunk_found = false;
@@ -2742,7 +2749,6 @@ size_t gc_heap::compact_or_sweep_gcs[2];
 #ifdef FEATURE_LOH_COMPACTION
 BOOL                   gc_heap::loh_compaction_always_p = FALSE;
 gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
-int                    gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;
 #endif //FEATURE_LOH_COMPACTION
 
 GCEvent gc_heap::full_gc_approach_event;
@@ -2855,12 +2861,20 @@ size_t     gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_co
 
 mark_queue_t gc_heap::mark_queue;
 
+#ifdef USE_REGIONS
 bool gc_heap::special_sweep_p = false;
+#endif //USE_REGIONS
+
+int gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;
 
 #endif // MULTIPLE_HEAPS
 
 /* end of per heap static initialization */
 
+#ifdef USE_REGIONS
+const size_t uninitialized_end_gen0_region_space = (size_t)(-1);
+#endif //USE_REGIONS
+
 // budget smoothing
 size_t     gc_heap::smoothed_desired_per_heap[total_generation_count];
 /* end of static initialization */
@@ -6900,6 +6914,14 @@ bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_numb
 
 bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_number, bool* hard_limit_exceeded_p)
 {
+    /**
+     * Here are all the possible cases for the commits:
+     *
+     * Case 1: This is for a particular generation - the bucket will be one of the gc_oh_num != unknown, and the h_number will be the right heap
+     * Case 2: This is for bookkeeping - the bucket will be recorded_committed_bookkeeping_bucket, and the h_number will be -1
+     *
+     * Note  : We never commit into free directly, so bucket != recorded_committed_free_bucket
+     */
 #ifndef HOST_64BIT
     assert (heap_hard_limit == 0);
 #endif //!HOST_64BIT
@@ -6915,21 +6937,41 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
         check_commit_cs.Enter();
         bool exceeded_p = false;
 
-        if (heap_hard_limit_oh[bucket] != 0)
+        if (heap_hard_limit_oh[soh] != 0)
         {
+#ifdef USE_REGIONS
+            assert (heap_hard_limit_for_heap == 0);
+            assert (heap_hard_limit_for_bookkeeping == 0);
+#endif //USE_REGIONS
             if ((bucket < total_oh_count) && (committed_by_oh[bucket] + size) > heap_hard_limit_oh[bucket])
             {
                 exceeded_p = true;
             }
         }
-        else if ((current_total_committed + size) > heap_hard_limit)
+        else
         {
-            dprintf (1, ("%Id + %Id = %Id > limit %Id ",
-                current_total_committed, size,
-                (current_total_committed + size),
-                heap_hard_limit));
-
-            exceeded_p = true;
+            size_t base;
+            size_t limit;
+#ifdef USE_REGIONS
+            if (h_number < 0)
+            {
+                base = current_total_committed_bookkeeping;
+                limit = heap_hard_limit_for_bookkeeping;
+            }
+            else
+            {
+                base = current_total_committed - current_total_committed_bookkeeping;
+                limit = heap_hard_limit_for_heap;
+            }
+#else
+            base = current_total_committed;
+            limit = heap_hard_limit;
+#endif //USE_REGIONS
+            if ((base + size) > limit)
+            {
+                dprintf (1, ("%Id + %Id = %Id > limit %Id ", base, size, (base + size), limit));
+                exceeded_p = true;
+            }
         }
 
         if (!exceeded_p)
@@ -6979,7 +7021,10 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
                 current_total_committed, (current_total_committed - size)));
         current_total_committed -= size;
         if (h_number < 0)
+        {
+            assert (current_total_committed_bookkeeping >= size);
             current_total_committed_bookkeeping -= size;
+        }
 
         check_commit_cs.Leave();
     }
@@ -6988,6 +7033,13 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
 
 bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number)
 {
+    /**
+     * Here are all possible cases for the decommits:
+     *
+     * Case 1: This is for a particular generation - the bucket will be one of the gc_oh_num != unknown, and the h_number will be the right heap
+     * Case 2: This is for bookkeeping - the bucket will be recorded_committed_bookkeeping_bucket, and the h_number will be -1
+     * Case 3: This is for free - the bucket will be recorded_committed_free_bucket, and the h_number will be -1
+     */
 #ifndef HOST_64BIT
     assert (heap_hard_limit == 0);
 #endif //!HOST_64BIT
@@ -7011,9 +7063,13 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu
             g_heaps[h_number]->committed_by_oh_per_heap[bucket] -= size;
         }
 #endif // _DEBUG && MULTIPLE_HEAPS
+        assert (current_total_committed >= size);
         current_total_committed -= size;
-        if (h_number < 0)
+        if (bucket == recorded_committed_bookkeeping_bucket)
+        {
+            assert (current_total_committed_bookkeeping >= size);
             current_total_committed_bookkeeping -= size;
+        }
         check_commit_cs.Leave();
     }
 
@@ -8718,10 +8774,10 @@ bool gc_heap::on_used_changed (uint8_t* new_used)
             {
                 if (new_bookkeeping_covered_committed == new_used)
                 {
-                    dprintf (REGIONS_LOG, ("The minimal commit for the GC bookkeepping data structure failed, giving up"));
+                    dprintf (REGIONS_LOG, ("The minimal commit for the GC bookkeeping data structure failed, giving up"));
                     return false;
                 }
-                dprintf (REGIONS_LOG, ("The speculative commit for the GC bookkeepping data structure failed, retry for minimal commit"));
+                dprintf (REGIONS_LOG, ("The speculative commit for the GC bookkeeping data structure failed, retry for minimal commit"));
                 speculative_commit_tried = true;
             }
         }
@@ -11233,6 +11289,7 @@ void gc_heap::clear_region_info (heap_segment* region)
     if (dt_high_memory_load_p())
     {
         decommit_mark_array_by_seg (region);
+        region->flags &= ~(heap_segment_flags_ma_committed);
     }
 #endif //BACKGROUND_GC
 }
@@ -11324,7 +11381,7 @@ heap_segment* gc_heap::get_free_region (int gen_number, size_t size)
         uint8_t* region_end = heap_segment_reserved (region);
         init_heap_segment (region, __this, region_start,
                            (region_end - region_start),
-                           gen_number);
+                           gen_number, true);
 
         gc_oh_num oh = gen_to_oh (gen_number);
         dprintf(3, ("commit-accounting:  from free to %d [%Ix, %Ix) for heap %d", oh, get_region_start (region), heap_segment_committed (region), heap_number));
@@ -11348,6 +11405,9 @@ heap_segment* gc_heap::get_free_region (int gen_number, size_t size)
             heap_number, (size_t)region,
             region_start, region_end,
             gen_number));
+
+        // Something is wrong if a free region is already filled
+        assert (heap_segment_allocated(region) == heap_segment_mem (region));
     }
     else
     {
@@ -11715,11 +11775,14 @@ heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, gc_he
 
 void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp
 #ifdef USE_REGIONS
-                                 , uint8_t* start, size_t size, int gen_num
+                                 , uint8_t* start, size_t size, int gen_num, bool existing_region_p
 #endif //USE_REGIONS
     )
 {
-    seg->flags = 0;
+#ifndef USE_REGIONS
+    bool existing_region_p = false;
+#endif //!USE_REGIONS
+    seg->flags = existing_region_p ? (seg->flags & heap_segment_flags_ma_committed) : 0;
     heap_segment_next (seg) = 0;
     heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
     heap_segment_allocated (seg) = heap_segment_mem (seg);
@@ -12593,6 +12656,20 @@ void gc_heap::distribute_free_regions()
 {
 #ifdef USE_REGIONS
     const int kind_count = large_free_region + 1;
+
+#ifdef MULTIPLE_HEAPS
+    BOOL joined_last_gc_before_oom = FALSE;
+    for (int i = 0; i < n_heaps; i++)
+    {
+        if (g_heaps[i]->last_gc_before_oom)
+        {
+            joined_last_gc_before_oom = TRUE;
+            break;
+        }
+    }
+#else
+    BOOL joined_last_gc_before_oom = last_gc_before_oom;
+#endif //MULTIPLE_HEAPS
     if (settings.reason == reason_induced_aggressive)
     {
 #ifdef MULTIPLE_HEAPS
@@ -12682,7 +12759,9 @@ void gc_heap::distribute_free_regions()
             {
                 next_region = heap_segment_next (region);
                 int age_in_free_to_decommit = min (max (AGE_IN_FREE_TO_DECOMMIT, n_heaps), MAX_AGE_IN_FREE);
-                if (heap_segment_age_in_free (region) >= age_in_free_to_decommit)
+                // when we are about to get OOM, we'd like to discount the free regions that just have the initial page commit as they are not useful
+                if ((heap_segment_age_in_free (region) >= age_in_free_to_decommit) ||
+                    ((get_region_committed_size (region) == GC_PAGE_SIZE) && joined_last_gc_before_oom))
                 {
                     num_decommit_regions_by_time++;
                     size_decommit_regions_by_time += get_region_committed_size (region);
@@ -13525,6 +13604,26 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
 #endif //BACKGROUND_GC
 #endif //WRITE_WATCH
 
+#ifdef USE_REGIONS
+    if (gc_heap::heap_hard_limit && gc_heap::heap_hard_limit_oh[soh] == 0)
+    {
+        size_t gc_region_size = (size_t)1 << min_segment_size_shr;
+        size_t sizes[total_bookkeeping_elements];
+        size_t bookkeeping_size_per_region = 0;
+        uint8_t* temp_lowest_address = (uint8_t*)gc_region_size;
+        gc_heap::get_card_table_element_sizes(temp_lowest_address, temp_lowest_address + gc_region_size, sizes);
+        for (int i = 0; i < total_bookkeeping_elements; i++)
+        {
+            bookkeeping_size_per_region += sizes[i];
+        }
+        size_t total_size_per_region = gc_region_size + bookkeeping_size_per_region;
+        size_t max_region_count = gc_heap::heap_hard_limit / total_size_per_region; // implictly rounded down
+        gc_heap::heap_hard_limit_for_heap = max_region_count * gc_region_size;
+        gc_heap::heap_hard_limit_for_bookkeeping = max_region_count * bookkeeping_size_per_region;
+        dprintf (REGIONS_LOG, ("bookkeeping_size_per_region = %Id", bookkeeping_size_per_region));
+    }
+#endif //USE_REGIONS
+
 #ifdef BACKGROUND_GC
     // leave the first page to contain only segment info
     // because otherwise we could need to revisit the first page frequently in
@@ -14234,7 +14333,10 @@ gc_heap::init_gc_heap (int h_number)
     loh_state_index = 0;
 #endif //RECORD_LOH_STATE
 
+#ifdef USE_REGIONS
     special_sweep_p = false;
+#endif //USE_REGIONS
+
 #endif //MULTIPLE_HEAPS
 
 #ifdef MULTIPLE_HEAPS
@@ -14304,6 +14406,7 @@ gc_heap::init_gc_heap (int h_number)
     num_condemned_regions = 0;
 #endif //STRESS_REGIONS
     end_gen0_region_space = 0;
+    end_gen0_region_committed_space = 0;
     gen0_pinned_free_space = 0;
     gen0_large_chunk_found = false;
     // REGIONS PERF TODO: we should really allocate the POH regions together just so that
@@ -16297,7 +16400,8 @@ BOOL gc_heap::short_on_end_of_seg (heap_segment* seg)
     uint8_t* allocated = heap_segment_allocated (seg);
 
 #ifdef USE_REGIONS
-    BOOL sufficient_p = sufficient_space_regions (end_gen0_region_space, end_space_after_gc());
+    assert (end_gen0_region_space != uninitialized_end_gen0_region_space);
+    BOOL sufficient_p = sufficient_space_regions_for_allocation (end_gen0_region_space, end_space_after_gc());
 #else
     BOOL sufficient_p = sufficient_space_end_seg (allocated,
                                                   heap_segment_committed (seg),
@@ -20201,7 +20305,8 @@ bool gc_heap::init_table_for_region (int gen_number, heap_segment* region)
         {
             dprintf (GC_TABLE_LOG, ("new seg %Ix, mark_array is %Ix",
                 heap_segment_mem (region), mark_array));
-            if (!commit_mark_array_new_seg (__this, region))
+            if (((region->flags & heap_segment_flags_ma_committed) == 0) &&
+                !commit_mark_array_new_seg (__this, region))
             {
                 dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new region %Ix-%Ix",
                     get_region_start (region), heap_segment_reserved (region)));
@@ -20211,6 +20316,10 @@ bool gc_heap::init_table_for_region (int gen_number, heap_segment* region)
                 return false;
             }
         }
+        if ((region->flags & heap_segment_flags_ma_committed) != 0)
+        {
+            bgc_verify_mark_array_cleared (region, true);
+        }
 #endif //BACKGROUND_GC
 
     if (gen_number <= max_generation)
@@ -21255,6 +21364,12 @@ void gc_heap::gc1()
             dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum));
         }
     }
+#ifdef USE_REGIONS
+    if (end_gen0_region_space == uninitialized_end_gen0_region_space)
+    {
+        end_gen0_region_space = get_gen0_end_space (memory_type_reserved);
+    }
+#endif //USE_REGIONS
 
     descr_generations ("END");
 
@@ -21607,6 +21722,9 @@ void gc_heap::gc1()
     recover_bgc_settings();
 #endif //BACKGROUND_GC
 #endif //MULTIPLE_HEAPS
+#ifdef USE_REGIONS
+    last_gc_before_oom = FALSE;
+#endif //USE_REGIONS
 }
 
 void gc_heap::save_data_for_no_gc()
@@ -22399,7 +22517,8 @@ void gc_heap::init_records()
     }
 
 #ifdef USE_REGIONS
-    end_gen0_region_space = 0;
+    end_gen0_region_space = uninitialized_end_gen0_region_space;
+    end_gen0_region_committed_space = 0;
     gen0_pinned_free_space = 0;
     gen0_large_chunk_found = false;
     num_regions_freed_in_sweep = 0;
@@ -26377,6 +26496,10 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
     static uint64_t last_mark_time = 0;
 #endif //FEATURE_EVENT_TRACE
 
+#ifdef USE_REGIONS
+    special_sweep_p = false;
+#endif //USE_REGIONS
+
 #ifdef MULTIPLE_HEAPS
     gc_t_join.join(this, gc_join_begin_mark_phase);
     if (gc_t_join.joined())
@@ -26385,7 +26508,6 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
         maxgen_size_inc_p = false;
 
 #ifdef USE_REGIONS
-        special_sweep_p = false;
         region_count = global_region_allocator.get_used_region_count();
         grow_mark_list_piece();
         verify_region_to_generation_map();
@@ -27913,8 +28035,7 @@ BOOL gc_heap::plan_loh()
         loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH;
     }
 
-    if (heap_number == 0)
-        loh_pinned_queue_decay = LOH_PIN_DECAY;
+    loh_pinned_queue_decay = LOH_PIN_DECAY;
 
     loh_pinned_queue_tos = 0;
     loh_pinned_queue_bos = 0;
@@ -28822,6 +28943,7 @@ void gc_heap::update_planned_gen0_free_space (size_t free_size, uint8_t* plug)
 // the regions again and do this update in plan phase.
 void gc_heap::get_gen0_end_plan_space()
 {
+    end_gen0_region_space = 0;
     for (int gen_idx = settings.condemned_generation; gen_idx >= 0; gen_idx--)
     {
         generation* gen = generation_of (gen_idx);
@@ -28853,7 +28975,7 @@ void gc_heap::get_gen0_end_plan_space()
     }
 }
 
-size_t gc_heap::get_gen0_end_space()
+size_t gc_heap::get_gen0_end_space(memory_type type)
 {
     size_t end_space = 0;
     heap_segment* seg = generation_start_segment (generation_of (0));
@@ -28867,12 +28989,13 @@ size_t gc_heap::get_gen0_end_space()
         //uint8_t* allocated = ((seg == ephemeral_heap_segment) ?
         //                      alloc_allocated : heap_segment_allocated (seg));
         uint8_t* allocated = heap_segment_allocated (seg);
-        end_space += heap_segment_reserved (seg) - allocated;
+        uint8_t* end = (type == memory_type_reserved) ? heap_segment_reserved (seg) : heap_segment_committed (seg);
 
+        end_space += end - allocated;
         dprintf (REGIONS_LOG, ("h%d gen0 seg %Ix, end %Ix-%Ix=%Ix, end_space->%Id",
             heap_number, heap_segment_mem (seg),
-            heap_segment_reserved (seg), allocated,
-            (heap_segment_reserved (seg) - allocated),
+            end, allocated,
+            (end - allocated),
             end_space));
 
         seg = heap_segment_next (seg);
@@ -30182,37 +30305,15 @@ void gc_heap::plan_phase (int condemned_gen_number)
         should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand);
     }
 
-#ifdef FEATURE_LOH_COMPACTION
-    loh_compacted_p = FALSE;
-#endif //FEATURE_LOH_COMPACTION
-
     if (condemned_gen_number == max_generation)
     {
 #ifdef FEATURE_LOH_COMPACTION
         if (settings.loh_compaction)
         {
-            if (plan_loh())
-            {
-                should_compact = TRUE;
-                get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
-                loh_compacted_p = TRUE;
-            }
+            should_compact = TRUE;
+            get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
         }
         else
-        {
-            if ((heap_number == 0) && (loh_pinned_queue))
-            {
-                loh_pinned_queue_decay--;
-
-                if (!loh_pinned_queue_decay)
-                {
-                    delete loh_pinned_queue;
-                    loh_pinned_queue = 0;
-                }
-            }
-        }
-
-        if (!loh_compacted_p)
 #endif //FEATURE_LOH_COMPACTION
         {
             GCToEEInterface::DiagWalkUOHSurvivors(__this, loh_generation);
@@ -30243,6 +30344,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
     gc_t_join.join(this, gc_join_decide_on_compaction);
     if (gc_t_join.joined())
     {
+#ifndef USE_REGIONS
         //safe place to delete large heap segments
         if (condemned_gen_number == max_generation)
         {
@@ -30251,7 +30353,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
                 g_heaps [i]->rearrange_uoh_segments ();
             }
         }
-
+#endif //!USE_REGIONS
         if (maxgen_size_inc_p && provisional_mode_triggered
 #ifdef BACKGROUND_GC
             && !is_bgc_in_progress()
@@ -30263,9 +30365,11 @@ void gc_heap::plan_phase (int condemned_gen_number)
         }
         else
         {
-#ifndef USE_REGIONS
+#ifdef USE_REGIONS
+            bool joined_special_sweep_p = false;
+#else
             settings.demotion = FALSE;
-#endif //!USE_REGIONS
+#endif //USE_REGIONS
             int pol_max = policy_sweep;
 #ifdef GC_CONFIG_DRIVEN
             BOOL is_compaction_mandatory = FALSE;
@@ -30276,14 +30380,16 @@ void gc_heap::plan_phase (int condemned_gen_number)
             {
                 if (pol_max < g_heaps[i]->gc_policy)
                     pol_max = policy_compact;
-#ifndef USE_REGIONS
+#ifdef USE_REGIONS
+                joined_special_sweep_p |= g_heaps[i]->special_sweep_p;
+#else
                 // set the demotion flag is any of the heap has demotion
                 if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
                 {
                     (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
                     settings.demotion = TRUE;
                 }
-#endif //!USE_REGIONS
+#endif //USE_REGIONS
 
 #ifdef GC_CONFIG_DRIVEN
                 if (!is_compaction_mandatory)
@@ -30318,7 +30424,8 @@ void gc_heap::plan_phase (int condemned_gen_number)
             for (i = 0; i < n_heaps; i++)
             {
 #ifdef USE_REGIONS
-                if (special_sweep_p)
+                g_heaps[i]->special_sweep_p = joined_special_sweep_p;
+                if (joined_special_sweep_p)
                 {
                     g_heaps[i]->gc_policy = policy_sweep;
                 }
@@ -30381,13 +30488,13 @@ void gc_heap::plan_phase (int condemned_gen_number)
     should_expand  = (gc_policy >= policy_expand);
 
 #else //MULTIPLE_HEAPS
-
+#ifndef USE_REGIONS
     //safe place to delete large heap segments
     if (condemned_gen_number == max_generation)
     {
         rearrange_uoh_segments ();
     }
-
+#endif //!USE_REGIONS
     if (maxgen_size_inc_p && provisional_mode_triggered
 #ifdef BACKGROUND_GC
         && !is_bgc_in_progress()
@@ -30437,6 +30544,41 @@ void gc_heap::plan_phase (int condemned_gen_number)
 #endif //FEATURE_EVENT_TRACE
 #endif //MULTIPLE_HEAPS
 
+#ifdef FEATURE_LOH_COMPACTION
+    loh_compacted_p = FALSE;
+#endif //FEATURE_LOH_COMPACTION
+
+    if (condemned_gen_number == max_generation)
+    {
+#ifdef FEATURE_LOH_COMPACTION
+        if (settings.loh_compaction)
+        {
+            if (should_compact && plan_loh())
+            {
+                loh_compacted_p = TRUE;
+            }
+            else
+            {
+                GCToEEInterface::DiagWalkUOHSurvivors(__this, loh_generation);
+                sweep_uoh_objects (loh_generation);
+            }
+        }
+        else
+        {
+            if (loh_pinned_queue)
+            {
+                loh_pinned_queue_decay--;
+
+                if (!loh_pinned_queue_decay)
+                {
+                    delete loh_pinned_queue;
+                    loh_pinned_queue = 0;
+                }
+            }
+        }
+#endif //FEATURE_LOH_COMPACTION
+    }
+
     if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered)
     {
         if ((settings.condemned_generation == (max_generation - 1)) &&
@@ -30605,10 +30747,16 @@ void gc_heap::plan_phase (int condemned_gen_number)
 #ifdef MULTIPLE_HEAPS
                 for (int i = 0; i < n_heaps; i++)
                 {
-                    g_heaps[i]->rearrange_heap_segments(TRUE);
+#ifdef USE_REGIONS
+                    g_heaps [i]->rearrange_uoh_segments();
+#endif //USE_REGIONS
+                    g_heaps [i]->rearrange_heap_segments (TRUE);
                 }
 #else //MULTIPLE_HEAPS
-                rearrange_heap_segments(TRUE);
+#ifdef USE_REGIONS
+                rearrange_uoh_segments();
+#endif //USE_REGIONS
+                rearrange_heap_segments (TRUE);
 #endif //MULTIPLE_HEAPS
 
 #ifdef MULTIPLE_HEAPS
@@ -30630,6 +30778,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
 #endif //!USE_REGIONS
 
         {
+#ifdef USE_REGIONS
+            end_gen0_region_committed_space = get_gen0_end_space (memory_type_committed);
+            dprintf(REGIONS_LOG, ("h%d computed the end_gen0_region_committed_space value to be %Id", heap_number, end_gen0_region_committed_space));
+#endif //USE_REGIONS
 #ifdef MULTIPLE_HEAPS
             dprintf(3, ("Joining after end of compaction"));
             gc_t_join.join(this, gc_join_adjust_handle_age_compact);
@@ -30892,6 +31044,11 @@ void gc_heap::plan_phase (int condemned_gen_number)
                 generation_free_obj_space (generation_of (max_generation))));
         }
 
+#ifdef USE_REGIONS
+        end_gen0_region_committed_space = get_gen0_end_space (memory_type_committed);
+        dprintf(REGIONS_LOG, ("h%d computed the end_gen0_region_committed_space value to be %Id", heap_number, end_gen0_region_committed_space));
+#endif //USE_REGIONS
+
 #ifdef MULTIPLE_HEAPS
         dprintf(3, ("Joining after end of sweep"));
         gc_t_join.join(this, gc_join_adjust_handle_age_sweep);
@@ -30906,7 +31063,9 @@ void gc_heap::plan_phase (int condemned_gen_number)
             }
 #endif //FEATURE_EVENT_TRACE
 
+#ifdef USE_REGIONS
             if (!special_sweep_p)
+#endif //USE_REGIONS
             {
                 GCScan::GcPromotionsGranted(condemned_gen_number,
                                                 max_generation, &sc);
@@ -30938,13 +31097,17 @@ void gc_heap::plan_phase (int condemned_gen_number)
         }
 
 #ifdef FEATURE_PREMORTEM_FINALIZATION
+#ifdef USE_REGIONS
         if (!special_sweep_p)
+#endif //USE_REGIONS
         {
             finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
         }
 #endif // FEATURE_PREMORTEM_FINALIZATION
 
+#ifdef USE_REGIONS
         if (!special_sweep_p)
+#endif //USE_REGIONS
         {
             clear_gen1_cards();
         }
@@ -31363,6 +31526,7 @@ void gc_heap::thread_final_regions (bool compact_p)
         else
         {
             start_region = get_free_region (gen_idx);
+            assert (start_region);
             thread_start_region (gen, start_region);
             dprintf (REGIONS_LOG, ("creating new gen%d at %Ix", gen_idx, heap_segment_mem (start_region)));
         }
@@ -31916,14 +32080,17 @@ void gc_heap::make_free_lists (int condemned_gen_number)
         size_t  end_brick = brick_of (end_address-1);
 
         int current_gen_num = i;
+#ifdef USE_REGIONS
         args.free_list_gen_number = (special_sweep_p ? current_gen_num : get_plan_gen_num (current_gen_num));
+#else
+        args.free_list_gen_number = get_plan_gen_num (current_gen_num);
+#endif //USE_REGIONS
         args.free_list_gen = generation_of (args.free_list_gen_number);
         args.highest_plug = 0;
 
 #ifdef USE_REGIONS
         dprintf (REGIONS_LOG, ("starting at gen%d %Ix -> %Ix", i, start_address, end_address));
 #else
-        assert (!special_sweep_p);
         args.current_gen_limit = (((current_gen_num == max_generation)) ?
                                   MAX_PTR :
                                   (generation_limit (args.free_list_gen_number)));
@@ -32015,8 +32182,6 @@ void gc_heap::make_free_lists (int condemned_gen_number)
         generation* gen_gen0 = generation_of (0);
         ephemeral_heap_segment = generation_start_segment (gen_gen0);
         alloc_allocated = heap_segment_allocated (ephemeral_heap_segment);
-        // Since we didn't compact, we should recalculate the end_gen0_region_space.
-        end_gen0_region_space = get_gen0_end_space();
 #else //USE_REGIONS
         int bottom_gen = 0;
         args.free_list_gen_number--;
@@ -34390,6 +34555,9 @@ BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
             dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
                                     start, end, lowest, highest));
             commit_flag = heap_segment_flags_ma_pcommitted;
+#ifdef USE_REGIONS
+            assert (!"Region should not have its mark array partially committed.");
+#endif
         }
 
         commit_start = max (lowest, start);
@@ -41225,7 +41393,6 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
 #ifdef USE_REGIONS
     if (special_sweep_p)
     {
-        last_gc_before_oom = FALSE;
         return FALSE;
     }
 #endif //USE_REGIONS
@@ -41242,7 +41409,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
     if ((condemned_gen_number == max_generation) && last_gc_before_oom)
     {
         should_compact = TRUE;
+#ifndef USE_REGIONS
         last_gc_before_oom = FALSE;
+#endif //!USE_REGIONS
         get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
     }
 
@@ -41448,9 +41617,36 @@ bool gc_heap::check_against_hard_limit (size_t space_required)
 }
 
 #ifdef USE_REGIONS
+bool gc_heap::sufficient_space_regions_for_allocation (size_t end_space, size_t end_space_required)
+{
+    // REGIONS PERF TODO: we can repurpose large regions here too, if needed.
+    size_t free_regions_space = (free_regions[basic_free_region].get_num_free_regions() * ((size_t)1 << min_segment_size_shr)) +
+                                global_region_allocator.get_free();
+    size_t total_alloc_space = end_space + free_regions_space;
+    dprintf (REGIONS_LOG, ("h%d required %Id, end %Id + free %Id=%Id",
+        heap_number, end_space_required, end_space, free_regions_space, total_alloc_space));
+    size_t total_commit_space = end_gen0_region_committed_space + free_regions[basic_free_region].get_size_committed_in_free();
+    if (total_alloc_space > end_space_required)
+    {
+        if (end_space_required > total_commit_space)
+        {
+            return check_against_hard_limit (end_space_required - total_commit_space);
+        }
+        else
+        {
+            return true;
+        }
+    }
+    else
+    {
+        return false;
+    }
+}
+
 bool gc_heap::sufficient_space_regions (size_t end_space, size_t end_space_required)
 {
     // REGIONS PERF TODO: we can repurpose large regions here too, if needed.
+    // REGIONS PERF TODO: for callsites other than allocation, we should also take commit into account
     size_t free_regions_space = (free_regions[basic_free_region].get_num_free_regions() * ((size_t)1 << min_segment_size_shr)) +
                                 global_region_allocator.get_free();
     size_t total_alloc_space = end_space + free_regions_space;
@@ -41461,7 +41657,9 @@ bool gc_heap::sufficient_space_regions (size_t end_space, size_t end_space_requi
         return check_against_hard_limit (end_space_required);
     }
     else
+    {
         return false;
+    }
 }
 #else //USE_REGIONS
 BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* committed, uint8_t* reserved, size_t end_space_required)
@@ -41636,7 +41834,7 @@ BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
         }
 
 #ifdef USE_REGIONS
-        size_t gen0_end_space = get_gen0_end_space();
+        size_t gen0_end_space = get_gen0_end_space (memory_type_reserved);
         BOOL can_fit = sufficient_space_regions (gen0_end_space, end_space);
 #else //USE_REGIONS
         BOOL can_fit = sufficient_space_end_seg (start, heap_segment_committed (ephemeral_heap_segment), heap_segment_reserved (ephemeral_heap_segment), end_space);
@@ -43740,16 +43938,21 @@ BOOL gc_heap::bgc_mark_array_range (heap_segment* seg,
     }
 }
 
-void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
+void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg, bool always_verify_p)
 {
 #ifdef _DEBUG
-    if (gc_heap::background_running_p())
+    if (gc_heap::background_running_p() || always_verify_p)
     {
         uint8_t* range_beg = 0;
         uint8_t* range_end = 0;
 
-        if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end))
+        if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end) || always_verify_p)
         {
+            if (always_verify_p)
+            {
+                range_beg = heap_segment_mem (seg);
+                range_end = heap_segment_reserved (seg);
+            }
             size_t  markw = mark_word_of (range_beg);
             size_t  markw_end = mark_word_of (range_end);
             while (markw < markw_end)
@@ -43770,7 +43973,7 @@ void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
             }
         }
     }
-#endif //VERIFY_HEAP
+#endif //_DEBUG
 }
 
 void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
index 0780124..74bba94 100644 (file)
@@ -429,6 +429,12 @@ const int recorded_committed_bucket_counts = recorded_committed_bookkeeping_buck
 
 gc_oh_num gen_to_oh (int gen);
 
+enum memory_type
+{
+    memory_type_reserved = 0,
+    memory_type_committed = 1
+};
+
 #if defined(TRACE_GC) && defined(BACKGROUND_GC)
 static const char * const str_bgc_state[] =
 {
@@ -1368,6 +1374,8 @@ public:
     PER_HEAP
     bool sufficient_space_regions (size_t end_space, size_t end_space_required);
     PER_HEAP
+    bool sufficient_space_regions_for_allocation (size_t end_space, size_t end_space_required);
+    PER_HEAP
     bool initial_make_soh_regions (gc_heap* hp);
     PER_HEAP
     bool initial_make_uoh_regions (int gen, gc_heap* hp);
@@ -1430,7 +1438,7 @@ public:
     PER_HEAP
     void get_gen0_end_plan_space();
     PER_HEAP
-    size_t get_gen0_end_space();
+    size_t get_gen0_end_space(memory_type type);
     PER_HEAP
     bool decide_on_compaction_space();
     PER_HEAP
@@ -2017,7 +2025,7 @@ protected:
     PER_HEAP_ISOLATED
     void init_heap_segment (heap_segment* seg, gc_heap* hp
 #ifdef USE_REGIONS
-                            , uint8_t* start, size_t size, int gen_num
+                            , uint8_t* start, size_t size, int gen_num, bool existing_region_p=false
 #endif //USE_REGIONS
                            );
     PER_HEAP
@@ -2525,7 +2533,7 @@ protected:
                                uint8_t** range_beg,
                                uint8_t** range_end);
     PER_HEAP
-    void bgc_verify_mark_array_cleared (heap_segment* seg);
+    void bgc_verify_mark_array_cleared (heap_segment* seg, bool always_verify_p = false);
     PER_HEAP
     void verify_mark_array_cleared();
     PER_HEAP
@@ -3679,6 +3687,10 @@ public:
     // sweep.
     size_t end_gen0_region_space;
 
+    PER_HEAP
+    // After GC we calculate this
+    size_t end_gen0_region_committed_space;
+
     // These are updated as we plan and will be used to make compaction
     // decision.
     PER_HEAP
@@ -3859,8 +3871,10 @@ public:
     PER_HEAP_ISOLATED
     VOLATILE(bool) full_gc_approach_event_set;
 
+#ifdef USE_REGIONS
     PER_HEAP
     bool special_sweep_p;
+#endif
 
 #ifdef BACKGROUND_GC
     PER_HEAP_ISOLATED
@@ -4099,6 +4113,12 @@ public:
     size_t heap_hard_limit_oh[total_oh_count];
 
     PER_HEAP_ISOLATED
+    size_t heap_hard_limit_for_heap;
+
+    PER_HEAP_ISOLATED
+    size_t heap_hard_limit_for_bookkeeping;
+
+    PER_HEAP_ISOLATED
     CLRCriticalSection check_commit_cs;
 
     PER_HEAP_ISOLATED
@@ -4224,7 +4244,7 @@ protected:
     PER_HEAP
     size_t      loh_pinned_queue_length;
 
-    PER_HEAP_ISOLATED
+    PER_HEAP
     int         loh_pinned_queue_decay;
 
     PER_HEAP