}
inline BOOL
+gc_heap::dt_high_memory_load_p()
+{
+ return ((settings.entry_memory_load >= high_memory_load_th) || g_low_memory_status);
+}
+
+inline BOOL
in_range_for_segment(uint8_t* add, heap_segment* seg)
{
return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
settings.gc_index, current_bgc_state,
seg_deleted);
- if (settings.entry_memory_load >= high_memory_load_th || g_low_memory_status)
+ if (dt_high_memory_load_p())
{
decommit_mark_array_by_seg (region);
}
uint8_t* new_committed)
{
#ifdef USE_REGIONS
- if (settings.entry_memory_load < high_memory_load_th && !g_low_memory_status)
+ if (!dt_high_memory_load_p())
{
return 0;
}
void gc_heap::decommit_heap_segment (heap_segment* seg)
{
#ifdef USE_REGIONS
- if (settings.entry_memory_load < high_memory_load_th && !g_low_memory_status)
+ if (!dt_high_memory_load_p())
{
return;
}
size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start;
// Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail
// on write watched memory.
- if (reset_mm_p && gc_heap::g_low_memory_status)
+ if (reset_mm_p && gc_heap::dt_high_memory_load_p())
{
#ifdef MULTIPLE_HEAPS
bool unlock_p = true;
PER_HEAP_ISOLATED
size_t exponential_smoothing (int gen, size_t collection_count, size_t desired_per_heap);
+ PER_HEAP_ISOLATED
+ BOOL dt_high_memory_load_p();
+
protected:
PER_HEAP
void update_collection_counts ();