// gen2
{256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
// loh
- {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
+ {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0},
+ // poh
+ // TODO: tuning https://github.com/dotnet/runtime/issues/13739
+ {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0},
},
// latency_level_balanced
// gen2
{256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
// loh
+ {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0},
+ // poh
+ // TODO: tuning https://github.com/dotnet/runtime/issues/13739
{3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
},
};
size_t gc_heap::bgc_begin_loh_size = 0;
size_t gc_heap::end_loh_size = 0;
+size_t gc_heap::bgc_begin_poh_size = 0;
+size_t gc_heap::end_poh_size = 0;
#ifdef BGC_SERVO_TUNING
uint64_t gc_heap::loh_a_no_bgc = 0;
size_t gc_heap::bgc_loh_size_increased = 0;
+size_t gc_heap::bgc_poh_size_increased = 0;
+
size_t gc_heap::background_soh_alloc_count = 0;
size_t gc_heap::background_uoh_alloc_count = 0;
alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];
+alloc_list gc_heap::poh_alloc_list [NUM_POH_ALIST-1];
dynamic_data gc_heap::dynamic_data_table [total_generation_count];
gc_history_per_heap gc_heap::gc_data_per_heap;
VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_soh;
VOLATILE(bool) gc_heap::card_mark_done_soh;
VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_loh;
+VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_poh;
VOLATILE(bool) gc_heap::card_mark_done_uoh;
#endif // FEATURE_CARD_MARKING_STEALING
imemory_data *initial_memory;
imemory_data *initial_normal_heap; // points into initial_memory_array
imemory_data *initial_large_heap; // points into initial_memory_array
+ imemory_data *initial_pinned_heap; // points into initial_memory_array
size_t block_size_normal;
size_t block_size_large;
+ size_t block_size_pinned;
int block_count; // # of blocks in each
int current_block_normal;
int current_block_large;
+ int current_block_pinned;
enum
{
{
case 0: return block_size_normal;
case 1: return block_size_large;
+ case 2: return block_size_pinned;
default: __UNREACHABLE();
}
};
case soh_gen1:
case soh_gen2: return initial_normal_heap[h_number].memory_base;
case loh_generation: return initial_large_heap[h_number].memory_base;
+ case poh_generation: return initial_pinned_heap[h_number].memory_base;
default: __UNREACHABLE();
}
};
case soh_gen1:
case soh_gen2: return block_size_normal;
case loh_generation: return block_size_large;
+ case poh_generation: return block_size_pinned;
default: __UNREACHABLE();
}
};
initial_memory_details memory_details;
-BOOL reserve_initial_memory (size_t normal_size, size_t large_size, int num_heaps, bool use_large_pages_p)
+BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p)
{
BOOL reserve_success = FALSE;
// should only be called once
assert (memory_details.initial_memory == 0);
- // soh + loh segments * num_heaps
- memory_details.initial_memory = new (nothrow) imemory_data[num_heaps * (total_generation_count - ephemeral_generation_count)];
+ // soh + loh + poh segments * num_heaps
+ memory_details.initial_memory = new (nothrow) imemory_data[num_heaps * (total_generation_count - ephemeral_generation_count)];
if (memory_details.initial_memory == 0)
{
dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps * (total_generation_count - ephemeral_generation_count) * sizeof (imemory_data)));
memory_details.initial_normal_heap = memory_details.initial_memory;
memory_details.initial_large_heap = memory_details.initial_normal_heap + num_heaps;
+ memory_details.initial_pinned_heap = memory_details.initial_large_heap + num_heaps;
memory_details.block_size_normal = normal_size;
memory_details.block_size_large = large_size;
+ memory_details.block_size_pinned = pinned_size;
memory_details.block_count = num_heaps;
memory_details.current_block_normal = 0;
memory_details.current_block_large = 0;
+ memory_details.current_block_pinned = 0;
g_gc_lowest_address = MAX_PTR;
g_gc_highest_address = 0;
return FALSE;
}
- if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size))
+ if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size + pinned_size))
{
dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count));
return FALSE;
}
- size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
+ size_t requestedMemory = memory_details.block_count * (normal_size + large_size + pinned_size);
uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory, use_large_pages_p);
if (allatonce_block)
(i * normal_size);
memory_details.initial_large_heap[i].memory_base = allatonce_block +
(memory_details.block_count * normal_size) + (i * large_size);
+ memory_details.initial_pinned_heap[i].memory_base = allatonce_block +
+ (memory_details.block_count * (normal_size + large_size)) + (i * pinned_size);
reserve_success = TRUE;
}
}
else
{
- // try to allocate 2 blocks
+ // try to allocate 3 blocks
uint8_t* b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size, use_large_pages_p);
uint8_t* b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size, use_large_pages_p);
+ uint8_t* b3 = (uint8_t*)virtual_alloc (memory_details.block_count * pinned_size, use_large_pages_p);
- if (b1 && b2)
+ if (b1 && b2 && b3)
{
memory_details.allocation_pattern = initial_memory_details::EACH_GENERATION;
- g_gc_lowest_address = min (b1, b2);
+ g_gc_lowest_address = min (b1, min(b2, b3));
g_gc_highest_address = max (b1 + memory_details.block_count * normal_size,
- b2 + memory_details.block_count * large_size);
+ max (b2 + memory_details.block_count * large_size,
+ b3 + memory_details.block_count * pinned_size));
for (int i = 0; i < memory_details.block_count; i++)
{
memory_details.initial_normal_heap[i].memory_base = b1 + (i * normal_size);
memory_details.initial_large_heap[i].memory_base = b2 + (i * large_size);
+ memory_details.initial_pinned_heap[i].memory_base = b3 + (i * pinned_size);
}
reserve_success = TRUE;
virtual_free (b1, memory_details.block_count * normal_size);
if (b2)
virtual_free (b2, memory_details.block_count * large_size);
+ if (b3)
+ virtual_free (b3, memory_details.block_count * pinned_size);
}
if ((b2 == NULL) && (memory_details.block_count > 1))
virtual_free (memory_details.initial_large_heap[0].memory_base,
memory_details.block_count*memory_details.block_size_large);
+
+ virtual_free (memory_details.initial_pinned_heap[0].memory_base,
+ memory_details.block_count*memory_details.block_size_pinned);
}
else
{
assert (memory_details.allocation_pattern == initial_memory_details::EACH_BLOCK);
imemory_data *current_block = memory_details.initial_memory;
- for(int i = 0; i < (memory_details.block_count*2); i++, current_block++)
+ for (int i = 0; i < (memory_details.block_count*(total_generation_count - ephemeral_generation_count)); i++, current_block++)
{
size_t block_size = memory_details.block_size (i);
if (current_block->memory_base != NULL)
memory_details.initial_memory = NULL;
memory_details.initial_normal_heap = NULL;
memory_details.initial_large_heap = NULL;
+ memory_details.initial_pinned_heap = NULL;
}
}
#ifdef MULTIPLE_HEAPS
heap_segment_heap (res) = hp;
#endif //MULTIPLE_HEAPS
- res->flags |= heap_segment_flags_loh;
+ res->flags |= gen_number == poh_generation ?
+ heap_segment_flags_poh :
+ heap_segment_flags_loh;
FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), gc_etw_segment_large_object_heap);
{
UNREFERENCED_PARAMETER(for_gc_p);
- for(int i = uoh_start_generation; i < total_generation_count; i++)
+ for (int i = uoh_start_generation; i < total_generation_count; i++)
{
#ifdef _DEBUG
alloc_context* acontext =
// We don't need to go through all the card tables here because
// we only need to copy from the GC version of the mark array - when we
- // mark (even in allocate_large_object) we always use that mark array.
+ // mark (even in allocate_uoh_object) we always use that mark array.
if ((card_table_highest_address (old_ct) >= start) &&
(card_table_lowest_address (old_ct) <= end))
{
}
HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
- size_t loh_segment_size
+ size_t loh_segment_size,
+ size_t poh_segment_size
#ifdef MULTIPLE_HEAPS
,int number_of_heaps
#endif //MULTIPLE_HEAPS
#endif //BACKGROUND_GC
reserved_memory = 0;
- size_t initial_heap_size = soh_segment_size + loh_segment_size;
+ size_t initial_heap_size = soh_segment_size + loh_segment_size + poh_segment_size;
#ifdef MULTIPLE_HEAPS
reserved_memory_limit = initial_heap_size * number_of_heaps;
#else //MULTIPLE_HEAPS
check_commit_cs.Initialize();
}
- if (!reserve_initial_memory (soh_segment_size, loh_segment_size, number_of_heaps, use_large_pages_p))
+ if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps, use_large_pages_p))
return E_OUTOFMEMORY;
#ifdef CARD_BUNDLE
#endif //!SEG_MAPPING_TABLE
- // Create segments for the large generation
+ // Create segments for the large and pinned generations
heap_segment* lseg = make_initial_segment(loh_generation, h_number);
if (!lseg)
return 0;
(size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
gc_etw_segment_large_object_heap);
+ heap_segment* pseg = make_initial_segment(poh_generation, h_number);
+ if (!pseg)
+ return 0;
+
+ pseg->flags |= heap_segment_flags_poh;
+
+ FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(pseg),
+ (size_t)(heap_segment_reserved (pseg) - heap_segment_mem(pseg)),
+ gc_etw_segment_pinned_object_heap);
+
#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (lseg, __this);
+ seg_mapping_table_add_segment (pseg, __this);
#else //SEG_MAPPING_TABLE
seg_table->insert ((uint8_t*)lseg, sdelta);
+ seg_table->insert ((uint8_t*)pseg, sdelta);
#endif //SEG_MAPPING_TABLE
make_generation (loh_generation, lseg, heap_segment_mem (lseg), 0);
+ make_generation (poh_generation, pseg, heap_segment_mem (pseg), 0);
heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;
+ heap_segment_allocated (pseg) = heap_segment_mem (pseg) + Align (min_obj_size, get_alignment_constant (FALSE));
+ heap_segment_used (pseg) = heap_segment_allocated (pseg) - plug_skew;
+
generation_of (max_generation)->free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST, gen2_alloc_list);
generation_of (loh_generation)->free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST, loh_alloc_list);
+ generation_of (poh_generation)->free_list_allocator = allocator(NUM_POH_ALIST, BASE_POH_ALIST, poh_alloc_list);
for (int gen_num = 0; gen_num < total_generation_count; gen_num++)
{
#ifdef MULTIPLE_HEAPS
heap_segment_heap (lseg) = this;
+ heap_segment_heap (pseg) = this;
//initialize the alloc context heap
generation_alloc_context (generation_of (soh_gen0))->set_alloc_heap(vm_heap);
generation_alloc_context (generation_of (loh_generation))->set_alloc_heap(vm_heap);
+ generation_alloc_context (generation_of (poh_generation))->set_alloc_heap(vm_heap);
#endif //MULTIPLE_HEAPS
background_uoh_alloc_count = 0;
bgc_overflow_count = 0;
end_loh_size = dd_min_size (dynamic_data_of (loh_generation));
+ end_poh_size = dd_min_size (dynamic_data_of (poh_generation));
#endif //BACKGROUND_GC
#ifdef GC_CONFIG_DRIVEN
return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size);
}
+int gc_heap::bgc_poh_allocate_spin()
+{
+ size_t min_gc_size = dd_min_size (dynamic_data_of (poh_generation));
+ size_t bgc_begin_size = bgc_begin_poh_size;
+ size_t bgc_size_increased = bgc_poh_size_increased;
+ size_t end_size = end_poh_size;
+
+ return bgc_allocate_spin(min_gc_size, bgc_begin_size, bgc_size_increased, end_size);
+}
#endif //BACKGROUND_GC
size_t gc_heap::get_uoh_seg_size (size_t size)
}
#endif //BGC_SERVO_TUNING
- int spin_for_allocation = bgc_loh_allocate_spin();
+ int spin_for_allocation = (gen_number == loh_generation) ?
+ bgc_loh_allocate_spin() :
+ bgc_poh_allocate_spin();
if (spin_for_allocation >= 0)
{
for (int i = 0; i < n_heaps; i++)
{
if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) ||
- ((g_heaps[i]->current_generation_size (loh_generation)) > bgc_min_per_heap))
+ ((g_heaps[i]->current_generation_size (loh_generation)) > bgc_min_per_heap) ||
+ ((g_heaps[i]->current_generation_size (poh_generation)) > bgc_min_per_heap))
{
bgc_heap_too_small = FALSE;
break;
}
#else //MULTIPLE_HEAPS
if ((current_generation_size (max_generation) > bgc_min_per_heap) ||
- (current_generation_size (loh_generation) > bgc_min_per_heap))
+ (current_generation_size (loh_generation) > bgc_min_per_heap) ||
+ (current_generation_size (poh_generation) > bgc_min_per_heap))
{
bgc_heap_too_small = FALSE;
}
if (i == max_generation)
{
dd_collection_count (dynamic_data_of (loh_generation))++;
+ dd_collection_count(dynamic_data_of(poh_generation))++;
}
dd_gc_clock (dd) = dd_gc_clock (dd0);
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
{
dprintf (3, ("Marking cross generation pointers for uoh objects on heap %d", heap_number));
- for(int i = uoh_start_generation; i < total_generation_count; i++)
+ for (int i = uoh_start_generation; i < total_generation_count; i++)
{
- mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG);
+#ifndef ALLOW_REFERENCES_IN_POH
+ if (i != poh_generation)
+#endif //ALLOW_REFERENCES_IN_POH
+ mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG);
}
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
if (!hp->card_mark_done_uoh)
{
dprintf(3, ("Marking cross generation pointers for large objects on heap %d", hp->heap_number));
- for(int i = uoh_start_generation; i < total_generation_count; i++)
+ for (int i = uoh_start_generation; i < total_generation_count; i++)
{
- hp->mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG);
+#ifndef ALLOW_REFERENCES_IN_POH
+ if (i != poh_generation)
+#endif //ALLOW_REFERENCES_IN_POH
+ hp->mark_through_cards_for_uoh_objects(mark_object_fn, i, FALSE THIS_ARG);
}
hp->card_mark_done_uoh = true;
GCToEEInterface::DiagWalkLOHSurvivors(__this);
sweep_uoh_objects (loh_generation);
}
+
+ sweep_uoh_objects (poh_generation);
}
else
{
if (!card_mark_done_uoh)
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
{
- dprintf (3, ("Relocating cross generation pointers for large objects on heap %d", heap_number));
- mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, loh_generation, TRUE THIS_ARG);
+ dprintf (3, ("Relocating cross generation pointers for uoh objects on heap %d", heap_number));
+ for (int i = uoh_start_generation; i < total_generation_count; i++)
+ {
+#ifndef ALLOW_REFERENCES_IN_POH
+ if (i != poh_generation)
+#endif //ALLOW_REFERENCES_IN_POH
+ mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG);
+ }
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
card_mark_done_uoh = true;
{
relocate_in_uoh_objects (loh_generation);
}
+
+#ifdef ALLOW_REFERENCES_IN_POH
+ relocate_in_uoh_objects (poh_generation);
+#endif
}
#ifndef FEATURE_CARD_MARKING_STEALING
// moved this code *before* we scan the older generations via mark_through_cards_xxx
if (!hp->card_mark_done_uoh)
{
- dprintf(3, ("Relocating cross generation pointers for large objects on heap %d", hp->heap_number));
- hp->mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, loh_generation, TRUE THIS_ARG);
+ dprintf(3, ("Relocating cross generation pointers for uoh objects on heap %d", hp->heap_number));
+ for (int i = uoh_start_generation; i < total_generation_count; i++)
+ {
+#ifndef ALLOW_REFERENCES_IN_POH
+ if (i != poh_generation)
+#endif //ALLOW_REFERENCES_IN_POH
+ hp->mark_through_cards_for_uoh_objects(&gc_heap::relocate_address, i, TRUE THIS_ARG);
+ }
hp->card_mark_done_uoh = true;
}
}
size_t total_soh_size = generation_sizes (generation_of (max_generation));
size_t total_loh_size = generation_size (loh_generation);
+ size_t total_poh_size = generation_size (poh_generation);
bgc_begin_loh_size = total_loh_size;
+ bgc_begin_poh_size = total_poh_size;
bgc_loh_size_increased = 0;
+ bgc_poh_size_increased = 0;
- dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
+ dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size));
{
//concurrent_print_time_delta ("copying stack roots");
total_soh_size = generation_sizes (generation_of (max_generation));
total_loh_size = generation_size (loh_generation);
+ total_poh_size = generation_size (poh_generation);
- dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
+ dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size));
dprintf (2, ("nonconcurrent marking stack roots"));
GCScan::GcScanRoots(background_promote,
gen0_bricks_cleared = FALSE;
- dprintf (2, ("end of bgc mark: loh: %d, soh: %d",
+ dprintf (2, ("end of bgc mark: loh: %d, poh: %d, soh: %d",
generation_size (loh_generation),
+ generation_size (poh_generation),
generation_sizes (generation_of (max_generation))));
for (int gen_idx = max_generation; gen_idx < total_generation_count; gen_idx++)
bool reset_watch_state = !!concurrent_p;
bool is_runtime_suspended = !concurrent_p;
BOOL small_object_segments = TRUE;
- for(int i = max_generation; i < total_generation_count; i++)
+ for (int i = max_generation; i < total_generation_count; i++)
{
heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (i)));
PREFIX_ASSUME(seg != NULL);
#ifdef BACKGROUND_GC
if (i == loh_generation)
end_loh_size = total_gen_size;
+
+ if (i == poh_generation)
+ end_poh_size = total_gen_size;
+
#endif //BACKGROUND_GC
//update counter
dd_promoted_size (dd) = out;
}
}
-CObjectHeader* gc_heap::allocate_large_object (size_t jsize, uint32_t flags, int64_t& alloc_bytes)
+CObjectHeader* gc_heap::allocate_uoh_object (size_t jsize, uint32_t flags, int gen_number, int64_t& alloc_bytes)
{
//create a new alloc context because gen3context is shared.
alloc_context acontext;
size_t size = AlignQword (jsize);
int align_const = get_alignment_constant (FALSE);
-#ifdef FEATURE_LOH_COMPACTION
- size_t pad = Align (loh_padding_obj_size, align_const);
-#else
size_t pad = 0;
+#ifdef FEATURE_LOH_COMPACTION
+ if (gen_number == loh_generation)
+ {
+ pad = Align (loh_padding_obj_size, align_const);
+ }
#endif //FEATURE_LOH_COMPACTION
assert (size >= Align (min_obj_size, align_const));
#ifdef _MSC_VER
#pragma inline_depth(0)
#endif //_MSC_VER
- if (! allocate_more_space (&acontext, (size + pad), flags, loh_generation))
+ if (! allocate_more_space (&acontext, (size + pad), flags, gen_number))
{
return 0;
}
mark_array_clear_marked (result);
}
#ifdef BACKGROUND_GC
- //the object has to cover one full mark uint32_t
- assert (size >= mark_word_size);
if (current_c_gc_state != c_gc_state_free)
{
dprintf (3, ("Concurrent allocation of a large object %Ix",
size_t total_soh_size = generation_sizes (generation_of (max_generation));
size_t total_loh_size = generation_size (loh_generation);
+ size_t total_poh_size = generation_size (poh_generation);
- dprintf (GTC_LOG, ("loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
+ dprintf (GTC_LOG, ("h%d: S: poh: %Id, loh: %Id, soh: %Id", heap_number, total_poh_size, total_loh_size, total_soh_size));
dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id",
generation_free_list_space (generation_of (max_generation)),
generation_free_list_space (generation_of (loh_generation)),
generation_free_obj_space (generation_of (loh_generation))));
+ dprintf (GTC_LOG, ("h%d: end of bgc sweep: poh FL: %Id, FO: %Id",
+ heap_number,
+ generation_free_list_space (generation_of (poh_generation)),
+ generation_free_obj_space (generation_of (poh_generation))));
+
FIRE_EVENT(BGC2ndConEnd);
concurrent_print_time_delta ("background sweep");
size_t total_cards_cleared = 0;
#ifdef FEATURE_CARD_MARKING_STEALING
- VOLATILE(uint32_t)* chunk_index = (VOLATILE(uint32_t)*) & card_mark_chunk_index_loh;
+ VOLATILE(uint32_t)* chunk_index = (VOLATILE(uint32_t)*) &(gen_num == loh_generation ?
+ card_mark_chunk_index_loh :
+ card_mark_chunk_index_poh);
card_marking_enumerator card_mark_enumerator(seg, low, chunk_index);
card_word_end = 0;
BOOL bad_ref_p = FALSE;
BOOL free_ref_p = FALSE;
- for(int i = max_generation; i < total_generation_count; i++)
+ for (int i = max_generation; i < total_generation_count; i++)
{
generation* gen = generation_of (i);
int align_const = get_alignment_constant (i == max_generation);
#ifdef MULTIPLE_HEAPS
gc_heap::n_heaps = nhp;
- hr = gc_heap::initialize_gc (seg_size, large_seg_size /*loh_segment_size*/, nhp);
+ // TODO: tuning https://github.com/dotnet/runtime/issues/13739
+ hr = gc_heap::initialize_gc (seg_size, large_seg_size /*loh_segment_size*/, large_seg_size /*poh_segment_size*/, nhp);
#else
- hr = gc_heap::initialize_gc (seg_size, large_seg_size /*loh_segment_size*/);
+ // TODO: tuning https://github.com/dotnet/runtime/issues/13739
+ hr = gc_heap::initialize_gc (seg_size, large_seg_size /*loh_segment_size*/, large_seg_size /*poh_segment_size*/);
#endif //MULTIPLE_HEAPS
if (hr != S_OK)
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
if (GCStress<cfg_any>::IsEnabled()) {
- for(int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
+ for (int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
{
m_StressObjs[i] = CreateGlobalHandle(0);
}
alloc_context* acontext = generation_alloc_context (hp->generation_of (loh_generation));
- newAlloc = (Object*) hp->allocate_large_object (size, flags, acontext->alloc_bytes_uoh);
+ newAlloc = (Object*) hp->allocate_uoh_object (size, flags, loh_generation, acontext->alloc_bytes_uoh);
ASSERT(((size_t)newAlloc & 7) == 0);
}
#endif //MULTIPLE_HEAPS
alloc_context* acontext = generation_alloc_context (hp->generation_of (loh_generation));
- newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_uoh);
+ newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
}
else
{
- newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_uoh);
+ newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);
#ifdef FEATURE_STRUCTALIGN
newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN