// If a field does not fit any of the above category, such as fgn_maxgen_percent which is only updated by an API,
// it will be marked as PER_HEAP_FIELD/PER_HEAP_ISOLATED_FIELD.
//
-// A couple of notes -
-//
+// A few notes -
+//
+// + within the section of a particular category of fields I use the following policy to list them -
+// I group the ones that are for the same purpose together, ie, without empty lines inbetween them.
+// I list the common ones first, ie, they apply regardless of defines. Then I list the defines in the order of
+// #ifdef MULTIPLE_HEAPS
+// #ifdef BACKGROUND_GC
+// #ifdef USE_REGIONS
+// other defines checks
+//
// + some of the fields are used by both regions and segments share. When that's the case, the annotation
-// is based on regions. So for segments they may or may not apply. Segments code is only in maintainence mode and
-// we are not investing actively in it.
+// is based on regions. So for segments they may or may not apply (segments code is in maintainence mode only).
//
// + some fields are used by the GC and WB but not by the allocator, in which case I will indicate them as such.
#ifdef MULTIPLE_HEAPS
class seg_free_spaces;
class gc_heap;
+#define youngest_generation (generation_of (0))
+#define large_object_generation (generation_of (loh_generation))
+#define pinned_object_generation (generation_of (poh_generation))
+
#ifdef BACKGROUND_GC
class exclusive_sync;
class recursive_gc_sync;
friend struct ::alloc_context;
friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags);
friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
+#ifdef FEATURE_64BIT_ALIGNMENT
+ friend Object* AllocAlign8(alloc_context* acontext, gc_heap* hp, size_t size, uint32_t flags);
+#endif //FEATURE_64BIT_ALIGNMENT
friend class t_join;
friend class gc_mechanisms;
friend class seg_free_spaces;
+ friend class mark;
+ friend class CObjectHeader;
#ifdef BACKGROUND_GC
friend class exclusive_sync;
#endif
public:
+ // table mapping region number to generation
+ // there are actually two generation numbers per entry:
+ // - the region's current generation
+ // - the region's planned generation, i.e. after the GC
+ // and there are flags
+ // - whether the region is sweep in plan
+ // - and whether the region is demoted
+ enum region_info : uint8_t
+ {
+ // lowest 2 bits are current generation number
+ RI_GEN_0 = 0x0,
+ RI_GEN_1 = 0x1,
+ RI_GEN_2 = 0x2,
+ RI_GEN_MASK = 0x3,
+
+ // we have 4 bits available for flags, of which 2 are used
+ RI_SIP = 0x4,
+ RI_DEMOTED = 0x8,
+
+ // top 2 bits are planned generation number
+ RI_PLAN_GEN_SHR = 0x6, // how much to shift the value right to obtain plan gen
+ RI_PLAN_GEN_0 = 0x00,
+ RI_PLAN_GEN_1 = 0x40,
+ RI_PLAN_GEN_2 = 0x80,
+ RI_PLAN_GEN_MASK = 0xC0,
+ };
+
+private:
#ifdef TRACE_GC
PER_HEAP_METHOD void print_free_list (int gen, heap_segment* seg);
#endif // TRACE_GC
#ifdef SYNCHRONIZATION_STATS
-
PER_HEAP_ISOLATED_METHOD void init_sync_stats()
{
#ifdef MULTIPLE_HEAPS
print_heap_sync_stats(0, gc_count_during_log);
#endif //MULTIPLE_HEAPS
}
-
#endif //SYNCHRONIZATION_STATS
PER_HEAP_METHOD void verify_soh_segment_list();
#endif //STRESS_REGIONS
#endif //USE_REGIONS
- PER_HEAP_ISOLATED_METHOD heap_segment* make_heap_segment (uint8_t* new_pages,
- size_t size,
- gc_heap* hp,
- int gen_num);
-
PER_HEAP_ISOLATED_METHOD gc_heap* make_gc_heap(
#if defined (MULTIPLE_HEAPS)
GCHeap* vm_heap,
PER_HEAP_ISOLATED_METHOD void add_to_history();
#ifdef BGC_SERVO_TUNING
- PER_HEAP_ISOLATED_METHOD void check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size);
- PER_HEAP_ISOLATED_METHOD void get_and_reset_loh_alloc_info();
-#endif //BGC_SERVO_TUNING
+ // Currently BGC servo tuning is an experimental feature.
+ class bgc_tuning
+ {
+ public:
+ struct tuning_calculation
+ {
+ // We use this virtual size that represents the generation
+ // size at goal. We calculate the flr based on this.
+ size_t end_gen_size_goal;
-#ifndef USE_REGIONS
- PER_HEAP_METHOD BOOL expand_soh_with_minimal_gc();
-#endif //!USE_REGIONS
+ // sweep goal is expressed as flr as we want to avoid
+ // expanding the gen size.
+ double sweep_flr_goal;
- // EE is always suspended when this method is called.
- // returning FALSE means we actually didn't do a GC. This happens
- // when we figured that we needed to do a BGC.
- PER_HEAP_METHOD void garbage_collect (int n);
+ // gen2 size at the end of last bgc.
+ size_t last_bgc_size;
- // Since we don't want to waste a join just to do this, I am doing
- // doing this at the last join in gc1.
- PER_HEAP_ISOLATED_METHOD void pm_full_gc_init_or_clear();
+ //
+ // these need to be double so we don't loose so much accurancy
+ // they are *100.0
+ //
+ // the FL ratio at the start of current bgc sweep.
+ double current_bgc_sweep_flr;
+ // the FL ratio at the end of last bgc.
+ // Only used for FF.
+ double last_bgc_flr;
+ // the FL ratio last time we started a bgc
+ double current_bgc_start_flr;
- // This does a GC when pm_trigger_full_gc is set
- PER_HEAP_METHOD void garbage_collect_pm_full_gc();
+ double above_goal_accu_error;
- PER_HEAP_ISOLATED_METHOD bool is_pm_ratio_exceeded();
+ // We will trigger the next BGC if this much
+ // alloc has been consumed between the last
+ // bgc end and now.
+ size_t alloc_to_trigger;
+ // actual consumed alloc
+ size_t actual_alloc_to_trigger;
- PER_HEAP_METHOD void init_records();
+ // the alloc between last bgc sweep start and end.
+ size_t last_bgc_end_alloc;
- PER_HEAP_ISOLATED_METHOD uint32_t* make_card_table (uint8_t* start, uint8_t* end);
+ //
+ // For smoothing calc
+ //
+ size_t smoothed_alloc_to_trigger;
- PER_HEAP_ISOLATED_METHOD void get_card_table_element_layout (uint8_t* start, uint8_t* end, size_t layout[total_bookkeeping_elements + 1]);
+ //
+ // For TBH
+ //
+ // last time we checked, were we above sweep flr goal?
+ bool last_sweep_above_p;
+ size_t alloc_to_trigger_0;
- PER_HEAP_ISOLATED_METHOD void get_card_table_element_sizes (uint8_t* start, uint8_t* end, size_t bookkeeping_sizes[total_bookkeeping_elements]);
+ // This is to get us started. It's set when we observe in a gen1
+ // GC when the memory load is high enough and is used to seed the first
+ // BGC triggered due to this tuning.
+ size_t first_alloc_to_trigger;
+ };
- PER_HEAP_ISOLATED_METHOD void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
+ struct tuning_stats
+ {
+ size_t last_bgc_physical_size;
-#ifdef USE_REGIONS
- PER_HEAP_ISOLATED_METHOD bool on_used_changed (uint8_t* left);
+ size_t last_alloc_end_to_start;
+ size_t last_alloc_start_to_sweep;
+ size_t last_alloc_sweep_to_end;
+ // records the alloc at the last significant point,
+ // used to calculate the 3 alloc's above.
+ // It's reset at bgc sweep start as that's when we reset
+ // all the allocation data (sweep_allocated/condemned_allocated/etc)
+ size_t last_alloc;
- PER_HEAP_ISOLATED_METHOD bool inplace_commit_card_table (uint8_t* from, uint8_t* to);
-#else //USE_REGIONS
- PER_HEAP_ISOLATED_METHOD int grow_brick_card_tables (uint8_t* start,
- uint8_t* end,
- size_t size,
- heap_segment* new_seg,
- gc_heap* hp,
- BOOL loh_p);
-#endif //USE_REGIONS
+ // the FL size at the end of last bgc.
+ size_t last_bgc_fl_size;
- PER_HEAP_ISOLATED_METHOD BOOL is_mark_set (uint8_t* o);
+ // last gen2 surv rate
+ double last_bgc_surv_rate;
-#ifdef FEATURE_BASICFREEZE
- PER_HEAP_ISOLATED_METHOD bool frozen_object_p(Object* obj);
-#endif // FEATURE_BASICFREEZE
+ // the FL ratio last time gen size increased.
+ double last_gen_increase_flr;
+ };
-protected:
- PER_HEAP_ISOLATED_METHOD BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p, uint16_t* heap_no_to_numa_node);
+ // This is just so that I don't need to calculate things multiple
+ // times. Only used during bgc end calculations. Everything that
+ // needs to be perserved across GCs will be saved in the other 2
+ // structs.
+ struct bgc_size_data
+ {
+ size_t gen_size;
+ size_t gen_physical_size;
+ size_t gen_fl_size;
+ // The actual physical fl size, unadjusted
+ size_t gen_actual_phys_fl_size;
+ // I call this physical_fl but really it's adjusted based on alloc
+ // that we haven't consumed because the other generation consumed
+ // its alloc and triggered the BGC. See init_bgc_end_data.
+ // We don't allow it to go negative.
+ ptrdiff_t gen_physical_fl_size;
+ double gen_physical_flr;
+ double gen_flr;
+ };
- PER_HEAP_ISOLATED_METHOD void destroy_initial_memory();
+ static bool enable_fl_tuning;
+ // the memory load we aim to maintain.
+ static uint32_t memory_load_goal;
- PER_HEAP_ISOLATED_METHOD void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ // if we are BGCMemGoalSlack above BGCMemGoal, this is where we
+ // panic and start to see if we should do NGC2.
+ static uint32_t memory_load_goal_slack;
+ // This is calculated based on memory_load_goal.
+ static uint64_t available_memory_goal;
+ // If we are above (ml goal + slack), we need to panic.
+ // Currently we just trigger the next GC as an NGC2, but
+ // we do track the accumulated error and could be more
+ // sophisticated about triggering NGC2 especially when
+ // slack is small. We could say unless we see the error
+ // is large enough would we actually trigger an NGC2.
+ static bool panic_activated_p;
+ static double accu_error_panic;
- PER_HEAP_METHOD void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ static double above_goal_kp;
+ static double above_goal_ki;
+ static bool enable_ki;
+ static bool enable_kd;
+ static bool enable_smooth;
+ static bool enable_tbh;
+ static bool enable_ff;
+ static bool enable_gradual_d;
+ static double above_goal_kd;
+ static double above_goal_ff;
+ static double num_gen1s_smooth_factor;
- struct walk_relocate_args
- {
- uint8_t* last_plug;
- BOOL is_shortened;
- mark* pinned_plug_entry;
- void* profiling_context;
- record_surv_fn fn;
- };
+ // for ML servo loop
+ static double ml_kp;
+ static double ml_ki;
- PER_HEAP_METHOD void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
+ // for ML loop ki
+ static double accu_error;
- PER_HEAP_METHOD void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
- walk_relocate_args* args);
+ // did we start tuning with FL yet?
+ static bool fl_tuning_triggered;
- PER_HEAP_METHOD void walk_relocation (void* profiling_context, record_surv_fn fn);
-#ifdef USE_REGIONS
- PER_HEAP_METHOD heap_segment* walk_relocation_sip (heap_segment* current_heap_segment, void* profiling_context, record_surv_fn fn);
-#endif // USE_REGIONS
- PER_HEAP_METHOD void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
+ // ==================================================
+ // ============what's used in calculation============
+ // ==================================================
+ //
+ // only used in smoothing.
+ static size_t num_bgcs_since_tuning_trigger;
- PER_HEAP_METHOD void walk_finalize_queue (fq_walk_fn fn);
+ // gen1 GC setting the next GC as a BGC when it observes the
+ // memory load is high enough for the first time.
+ static bool next_bgc_p;
-#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
- PER_HEAP_METHOD void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
-#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ // this is organized as:
+ // element 0 is for max_generation
+ // element 1 is for max_generation+1
+ static tuning_calculation gen_calc[2];
- // used in blocking GCs after plan phase so this walks the plugs.
- PER_HEAP_METHOD void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
- PER_HEAP_METHOD void walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number);
+ // ======================================================
+ // ============what's used to only show stats============
+ // ======================================================
+ //
+ // how many gen1's actually happened before triggering next bgc.
+ static size_t actual_num_gen1s_to_trigger;
- PER_HEAP_METHOD int generation_to_condemn (int n,
- BOOL* blocking_collection_p,
- BOOL* elevation_requested_p,
- BOOL check_only_p);
+ static size_t gen1_index_last_bgc_end;
+ static size_t gen1_index_last_bgc_start;
+ static size_t gen1_index_last_bgc_sweep;
- PER_HEAP_ISOLATED_METHOD int joined_generation_to_condemn (BOOL should_evaluate_elevation,
- int initial_gen,
- int current_gen,
- BOOL* blocking_collection
- STRESS_HEAP_ARG(int n_original));
+ static tuning_stats gen_stats[2];
+ // ============end of stats============
- PER_HEAP_METHOD size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
+ static bgc_size_data current_bgc_end_data[2];
- PER_HEAP_ISOLATED_METHOD uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
+ static size_t last_stepping_bgc_count;
+ static uint32_t last_stepping_mem_load;
+ static uint32_t stepping_interval;
- PER_HEAP_METHOD void concurrent_print_time_delta (const char* msg);
- PER_HEAP_METHOD void free_list_info (int gen_num, const char* msg);
+ // When we are in the initial stage before fl tuning is triggered.
+ static bool use_stepping_trigger_p;
- // in svr GC on entry and exit of this method, the GC threads are not
+ // the gen2 correction factor is used to put more emphasis
+ // on the gen2 when it triggered the BGC.
+ // If the BGC was triggered due to gen3, we decrease this
+ // factor.
+ static double gen2_ratio_correction;
+ static double ratio_correction_step;
+
+ // Since we have 2 loops, this BGC was caused by one of them; for the other loop we know
+ // we didn't reach the goal so use the output from last time.
+ static void calculate_tuning(int gen_number, bool use_this_loop_p);
+
+ static void init_bgc_end_data(int gen_number, bool use_this_loop_p);
+ static void calc_end_bgc_fl(int gen_number);
+
+ static void convert_to_fl(bool use_gen2_loop_p, bool use_gen3_loop_p);
+ static double calculate_ml_tuning(uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki);
+
+ // This invokes the ml tuning loop and sets the total gen sizes, ie
+ // including vfl.
+ static void set_total_gen_sizes(bool use_gen2_loop_p, bool use_gen3_loop_p);
+
+ static bool should_trigger_bgc_loh();
+
+ // This is only called when we've already stopped for GC.
+ // For LOH we'd be doing this in the alloc path.
+ static bool should_trigger_bgc();
+
+ // If we keep being above ml goal, we need to compact.
+ static bool should_trigger_ngc2();
+
+ // Only implemented for gen2 now while we are in sweep.
+ // Before we could build up enough fl, we delay gen1 consuming
+ // gen2 alloc so we don't get into panic.
+ // When we maintain the fl instead of building a new one, this
+ // can be eliminated.
+ static bool should_delay_alloc(int gen_number);
+
+ // When we are under the memory load goal, we'd like to do 10 BGCs
+ // before we reach the goal.
+ static bool stepping_trigger(uint32_t current_memory_load, size_t current_gen2_count);
+
+ static void update_bgc_start(int gen_number, size_t num_gen1s_since_end);
+ // Updates the following:
+ // current_bgc_start_flr
+ // actual_alloc_to_trigger
+ // last_alloc_end_to_start
+ // last_alloc
+ // actual_num_gen1s_to_trigger
+ // gen1_index_last_bgc_start
+ static void record_bgc_start();
+
+ static void update_bgc_sweep_start(int gen_number, size_t num_gen1s_since_start);
+ // Updates the following:
+ // current_bgc_sweep_flr
+ // last_alloc_start_to_sweep
+ // last_alloc
+ // gen1_index_last_bgc_sweep
+ static void record_bgc_sweep_start();
+ // Updates the rest
+ static void record_and_adjust_bgc_end();
+ };
+
+ PER_HEAP_ISOLATED_METHOD void check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size);
+ PER_HEAP_ISOLATED_METHOD void get_and_reset_loh_alloc_info();
+#endif //BGC_SERVO_TUNING
+
+#ifndef USE_REGIONS
+ PER_HEAP_METHOD BOOL expand_soh_with_minimal_gc();
+#endif //!USE_REGIONS
+
+ // EE is always suspended when this method is called.
+ // returning FALSE means we actually didn't do a GC. This happens
+ // when we figured that we needed to do a BGC.
+ PER_HEAP_METHOD void garbage_collect (int n);
+
+ // Since we don't want to waste a join just to do this, I am doing
+ // doing this at the last join in gc1.
+ PER_HEAP_ISOLATED_METHOD void pm_full_gc_init_or_clear();
+
+ // This does a GC when pm_trigger_full_gc is set
+ PER_HEAP_METHOD void garbage_collect_pm_full_gc();
+
+ PER_HEAP_ISOLATED_METHOD bool is_pm_ratio_exceeded();
+
+ PER_HEAP_METHOD void init_records();
+
+ PER_HEAP_ISOLATED_METHOD uint32_t* make_card_table (uint8_t* start, uint8_t* end);
+
+ PER_HEAP_ISOLATED_METHOD void get_card_table_element_layout (uint8_t* start, uint8_t* end, size_t layout[total_bookkeeping_elements + 1]);
+
+ PER_HEAP_ISOLATED_METHOD void get_card_table_element_sizes (uint8_t* start, uint8_t* end, size_t bookkeeping_sizes[total_bookkeeping_elements]);
+
+ PER_HEAP_ISOLATED_METHOD void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p);
+
+#ifdef USE_REGIONS
+ PER_HEAP_ISOLATED_METHOD bool on_used_changed (uint8_t* left);
+
+ PER_HEAP_ISOLATED_METHOD bool inplace_commit_card_table (uint8_t* from, uint8_t* to);
+#else //USE_REGIONS
+ PER_HEAP_ISOLATED_METHOD int grow_brick_card_tables (uint8_t* start,
+ uint8_t* end,
+ size_t size,
+ heap_segment* new_seg,
+ gc_heap* hp,
+ BOOL loh_p);
+#endif //USE_REGIONS
+
+ PER_HEAP_ISOLATED_METHOD BOOL is_mark_set (uint8_t* o);
+
+ PER_HEAP_ISOLATED_METHOD BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t pinned_size, int num_heaps, bool use_large_pages_p, bool separated_poh_p, uint16_t* heap_no_to_numa_node);
+
+ PER_HEAP_ISOLATED_METHOD void destroy_initial_memory();
+
+ PER_HEAP_ISOLATED_METHOD void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+
+ PER_HEAP_METHOD void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+
+ struct walk_relocate_args
+ {
+ uint8_t* last_plug;
+ BOOL is_shortened;
+ mark* pinned_plug_entry;
+ void* profiling_context;
+ record_surv_fn fn;
+ };
+
+ PER_HEAP_METHOD void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type);
+
+ PER_HEAP_METHOD void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
+ walk_relocate_args* args);
+
+ PER_HEAP_METHOD void walk_relocation (void* profiling_context, record_surv_fn fn);
+#ifdef USE_REGIONS
+ PER_HEAP_METHOD heap_segment* walk_relocation_sip (heap_segment* current_heap_segment, void* profiling_context, record_surv_fn fn);
+#endif // USE_REGIONS
+ PER_HEAP_METHOD void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
+
+ PER_HEAP_METHOD void walk_finalize_queue (fq_walk_fn fn);
+
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ PER_HEAP_METHOD void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn);
+#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+
+ // used in blocking GCs after plan phase so this walks the plugs.
+ PER_HEAP_METHOD void walk_survivors_relocation (void* profiling_context, record_surv_fn fn);
+ PER_HEAP_METHOD void walk_survivors_for_uoh (void* profiling_context, record_surv_fn fn, int gen_number);
+
+ PER_HEAP_METHOD int generation_to_condemn (int n,
+ BOOL* blocking_collection_p,
+ BOOL* elevation_requested_p,
+ BOOL check_only_p);
+
+ PER_HEAP_ISOLATED_METHOD int joined_generation_to_condemn (BOOL should_evaluate_elevation,
+ int initial_gen,
+ int current_gen,
+ BOOL* blocking_collection
+ STRESS_HEAP_ARG(int n_original));
+
+ PER_HEAP_METHOD size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
+
+ PER_HEAP_ISOLATED_METHOD uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
+
+ PER_HEAP_METHOD void concurrent_print_time_delta (const char* msg);
+ PER_HEAP_METHOD void free_list_info (int gen_num, const char* msg);
+
+ // in svr GC on entry and exit of this method, the GC threads are not
// synchronized
PER_HEAP_METHOD void gc1();
PER_HEAP_METHOD BOOL bgc_poh_allocate_spin();
#endif //BACKGROUND_GC
-#define max_saved_spinlock_info 48
-
-#ifdef SPINLOCK_HISTORY
- PER_HEAP_FIELD_DIAG_ONLY int spinlock_info_index;
-
- PER_HEAP_FIELD_DIAG_ONLY spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
-#endif //SPINLOCK_HISTORY
-
PER_HEAP_METHOD void add_saved_spinlock_info (
bool loh_p,
msl_enter_state enter_state,
int align_const);
#ifdef RECORD_LOH_STATE
- #define max_saved_loh_states 12
- PER_HEAP_FIELD_DIAG_ONLY int loh_state_index;
-
- struct loh_state_info
- {
- allocation_state alloc_state;
- EEThreadId thread_id;
- };
-
- PER_HEAP_FIELD_DIAG_ONLY loh_state_info last_loh_states[max_saved_loh_states];
PER_HEAP_METHOD void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
#endif //RECORD_LOH_STATE
PER_HEAP_METHOD allocation_state allocate_uoh (int gen_number,
PER_HEAP_ISOLATED_METHOD bool virtual_commit (void* address, size_t size, int bucket, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1);
PER_HEAP_ISOLATED_METHOD void virtual_free (void* add, size_t size, heap_segment* sg=NULL);
+ PER_HEAP_ISOLATED_METHOD void reset_memory(uint8_t* o, size_t sizeo);
PER_HEAP_METHOD void clear_gen0_bricks();
PER_HEAP_METHOD void check_gen0_bricks();
#ifdef BACKGROUND_GC
PER_HEAP_METHOD void set_background_overflow_p (uint8_t* oo);
#endif
-#ifdef BGC_SERVO_TUNING
-
- // Currently BGC servo tuning is an experimental feature.
- class bgc_tuning
- {
- public:
- struct tuning_calculation
- {
- // We use this virtual size that represents the generation
- // size at goal. We calculate the flr based on this.
- size_t end_gen_size_goal;
+#endif //BACKGROUND_GC
- // sweep goal is expressed as flr as we want to avoid
- // expanding the gen size.
- double sweep_flr_goal;
+ PER_HEAP_METHOD void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
+ PER_HEAP_METHOD BOOL process_mark_overflow (int condemned_gen_number);
+ PER_HEAP_METHOD void process_mark_overflow_internal (int condemned_gen_number,
+ uint8_t* min_address, uint8_t* max_address);
- // gen2 size at the end of last bgc.
- size_t last_bgc_size;
+#ifdef SNOOP_STATS
+ PER_HEAP_METHOD void print_snoop_stat();
+#endif //SNOOP_STATS
- //
- // these need to be double so we don't loose so much accurancy
- // they are *100.0
- //
- // the FL ratio at the start of current bgc sweep.
- double current_bgc_sweep_flr;
- // the FL ratio at the end of last bgc.
- // Only used for FF.
- double last_bgc_flr;
- // the FL ratio last time we started a bgc
- double current_bgc_start_flr;
+#ifdef MH_SC_MARK
+ PER_HEAP_METHOD BOOL check_next_mark_stack (gc_heap* next_heap);
+#endif //MH_SC_MARK
- double above_goal_accu_error;
+ PER_HEAP_METHOD void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
- // We will trigger the next BGC if this much
- // alloc has been consumed between the last
- // bgc end and now.
- size_t alloc_to_trigger;
- // actual consumed alloc
- size_t actual_alloc_to_trigger;
+ PER_HEAP_METHOD size_t get_generation_start_size (int gen_number);
- // the alloc between last bgc sweep start and end.
- size_t last_bgc_end_alloc;
+ PER_HEAP_ISOLATED_METHOD int get_num_heaps();
- //
- // For smoothing calc
- //
- size_t smoothed_alloc_to_trigger;
+ PER_HEAP_METHOD BOOL decide_on_promotion_surv (size_t threshold);
- //
- // For TBH
- //
- // last time we checked, were we above sweep flr goal?
- bool last_sweep_above_p;
- size_t alloc_to_trigger_0;
+ PER_HEAP_METHOD void mark_phase (int condemned_gen_number, BOOL mark_only_p);
- // This is to get us started. It's set when we observe in a gen1
- // GC when the memory load is high enough and is used to seed the first
- // BGC triggered due to this tuning.
- size_t first_alloc_to_trigger;
- };
+ PER_HEAP_METHOD void pin_object (uint8_t* o, uint8_t** ppObject);
- struct tuning_stats
- {
- size_t last_bgc_physical_size;
+ PER_HEAP_ISOLATED_METHOD size_t get_total_pinned_objects();
- size_t last_alloc_end_to_start;
- size_t last_alloc_start_to_sweep;
- size_t last_alloc_sweep_to_end;
- // records the alloc at the last significant point,
- // used to calculate the 3 alloc's above.
- // It's reset at bgc sweep start as that's when we reset
- // all the allocation data (sweep_allocated/condemned_allocated/etc)
- size_t last_alloc;
-
- // the FL size at the end of last bgc.
- size_t last_bgc_fl_size;
-
- // last gen2 surv rate
- double last_bgc_surv_rate;
-
- // the FL ratio last time gen size increased.
- double last_gen_increase_flr;
- };
-
- // This is just so that I don't need to calculate things multiple
- // times. Only used during bgc end calculations. Everything that
- // needs to be perserved across GCs will be saved in the other 2
- // structs.
- struct bgc_size_data
- {
- size_t gen_size;
- size_t gen_physical_size;
- size_t gen_fl_size;
- // The actual physical fl size, unadjusted
- size_t gen_actual_phys_fl_size;
- // I call this physical_fl but really it's adjusted based on alloc
- // that we haven't consumed because the other generation consumed
- // its alloc and triggered the BGC. See init_bgc_end_data.
- // We don't allow it to go negative.
- ptrdiff_t gen_physical_fl_size;
- double gen_physical_flr;
- double gen_flr;
- };
-
- static bool enable_fl_tuning;
- // the memory load we aim to maintain.
- static uint32_t memory_load_goal;
-
- // if we are BGCMemGoalSlack above BGCMemGoal, this is where we
- // panic and start to see if we should do NGC2.
- static uint32_t memory_load_goal_slack;
- // This is calculated based on memory_load_goal.
- static uint64_t available_memory_goal;
- // If we are above (ml goal + slack), we need to panic.
- // Currently we just trigger the next GC as an NGC2, but
- // we do track the accumulated error and could be more
- // sophisticated about triggering NGC2 especially when
- // slack is small. We could say unless we see the error
- // is large enough would we actually trigger an NGC2.
- static bool panic_activated_p;
- static double accu_error_panic;
-
- static double above_goal_kp;
- static double above_goal_ki;
- static bool enable_ki;
- static bool enable_kd;
- static bool enable_smooth;
- static bool enable_tbh;
- static bool enable_ff;
- static bool enable_gradual_d;
- static double above_goal_kd;
- static double above_goal_ff;
- static double num_gen1s_smooth_factor;
-
- // for ML servo loop
- static double ml_kp;
- static double ml_ki;
-
- // for ML loop ki
- static double accu_error;
-
- // did we start tuning with FL yet?
- static bool fl_tuning_triggered;
-
- // ==================================================
- // ============what's used in calculation============
- // ==================================================
- //
- // only used in smoothing.
- static size_t num_bgcs_since_tuning_trigger;
-
- // gen1 GC setting the next GC as a BGC when it observes the
- // memory load is high enough for the first time.
- static bool next_bgc_p;
-
- // this is organized as:
- // element 0 is for max_generation
- // element 1 is for max_generation+1
- static tuning_calculation gen_calc[2];
-
- // ======================================================
- // ============what's used to only show stats============
- // ======================================================
- //
- // how many gen1's actually happened before triggering next bgc.
- static size_t actual_num_gen1s_to_trigger;
-
- static size_t gen1_index_last_bgc_end;
- static size_t gen1_index_last_bgc_start;
- static size_t gen1_index_last_bgc_sweep;
-
- static tuning_stats gen_stats[2];
- // ============end of stats============
-
- static bgc_size_data current_bgc_end_data[2];
-
- static size_t last_stepping_bgc_count;
- static uint32_t last_stepping_mem_load;
- static uint32_t stepping_interval;
-
- // When we are in the initial stage before fl tuning is triggered.
- static bool use_stepping_trigger_p;
-
- // the gen2 correction factor is used to put more emphasis
- // on the gen2 when it triggered the BGC.
- // If the BGC was triggered due to gen3, we decrease this
- // factor.
- static double gen2_ratio_correction;
- static double ratio_correction_step;
-
- // Since we have 2 loops, this BGC was caused by one of them; for the other loop we know
- // we didn't reach the goal so use the output from last time.
- static void calculate_tuning (int gen_number, bool use_this_loop_p);
-
- static void init_bgc_end_data (int gen_number, bool use_this_loop_p);
- static void calc_end_bgc_fl (int gen_number);
-
- static void convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p);
- static double calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki);
-
- // This invokes the ml tuning loop and sets the total gen sizes, ie
- // including vfl.
- static void set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p);
-
- static bool should_trigger_bgc_loh();
-
- // This is only called when we've already stopped for GC.
- // For LOH we'd be doing this in the alloc path.
- static bool should_trigger_bgc();
-
- // If we keep being above ml goal, we need to compact.
- static bool should_trigger_ngc2();
-
- // Only implemented for gen2 now while we are in sweep.
- // Before we could build up enough fl, we delay gen1 consuming
- // gen2 alloc so we don't get into panic.
- // When we maintain the fl instead of building a new one, this
- // can be eliminated.
- static bool should_delay_alloc (int gen_number);
-
- // When we are under the memory load goal, we'd like to do 10 BGCs
- // before we reach the goal.
- static bool stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count);
-
- static void update_bgc_start (int gen_number, size_t num_gen1s_since_end);
- // Updates the following:
- // current_bgc_start_flr
- // actual_alloc_to_trigger
- // last_alloc_end_to_start
- // last_alloc
- // actual_num_gen1s_to_trigger
- // gen1_index_last_bgc_start
- static void record_bgc_start();
-
- static void update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start);
- // Updates the following:
- // current_bgc_sweep_flr
- // last_alloc_start_to_sweep
- // last_alloc
- // gen1_index_last_bgc_sweep
- static void record_bgc_sweep_start();
- // Updates the rest
- static void record_and_adjust_bgc_end();
- };
-
- // This tells us why we chose to do a bgc in tuning.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int saved_bgc_tuning_reason;
-#endif //BGC_SERVO_TUNING
-
-#endif //BACKGROUND_GC
-
- PER_HEAP_METHOD void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL);
- PER_HEAP_METHOD BOOL process_mark_overflow (int condemned_gen_number);
- PER_HEAP_METHOD void process_mark_overflow_internal (int condemned_gen_number,
- uint8_t* min_address, uint8_t* max_address);
-
-#ifdef SNOOP_STATS
- PER_HEAP_METHOD void print_snoop_stat();
-#endif //SNOOP_STATS
-
-#ifdef MH_SC_MARK
- PER_HEAP_METHOD BOOL check_next_mark_stack (gc_heap* next_heap);
-#endif //MH_SC_MARK
-
- PER_HEAP_METHOD void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p);
-
- PER_HEAP_METHOD size_t get_generation_start_size (int gen_number);
-
- PER_HEAP_ISOLATED_METHOD int get_num_heaps();
-
- PER_HEAP_METHOD BOOL decide_on_promotion_surv (size_t threshold);
-
- PER_HEAP_METHOD void mark_phase (int condemned_gen_number, BOOL mark_only_p);
-
- PER_HEAP_METHOD void pin_object (uint8_t* o, uint8_t** ppObject);
-
- PER_HEAP_ISOLATED_METHOD size_t get_total_pinned_objects();
-
- PER_HEAP_ISOLATED_METHOD void reinit_pinned_objects();
+ PER_HEAP_ISOLATED_METHOD void reinit_pinned_objects();
PER_HEAP_METHOD void reset_mark_stack ();
PER_HEAP_METHOD uint8_t* insert_node (uint8_t* new_node, size_t sequence_number,
PER_HEAP_METHOD void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len);
#endif //BACKGROUND_GC
- PER_HEAP_ISOLATED bool is_in_find_object_range (uint8_t* o);
+ PER_HEAP_ISOLATED_METHOD bool is_in_find_object_range (uint8_t* o);
#ifdef USE_REGIONS
PER_HEAP_ISOLATED_METHOD bool is_in_gc_range (uint8_t* o);
PER_HEAP_ISOLATED_METHOD void bgc_thread_stub (void* arg);
#endif //BACKGROUND_GC
-public:
+ PER_HEAP_METHOD void add_to_oom_history_per_heap();
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(bool) internal_gc_done;
+ PER_HEAP_METHOD void set_gc_done();
+ PER_HEAP_METHOD void reset_gc_done();
+ PER_HEAP_METHOD void enter_gc_done_event_lock();
+ PER_HEAP_METHOD void exit_gc_done_event_lock();
-#ifdef BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint32_t cm_in_progress;
+ PER_HEAP_ISOLATED_METHOD uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
- // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
- // we do right before the bgc starts.
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL dont_restart_ee_p;
+ PER_HEAP_ISOLATED_METHOD wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent bgc_start_event;
+#ifdef BACKGROUND_GC
+ PER_HEAP_ISOLATED_METHOD void add_bgc_pause_duration_0();
+ PER_HEAP_ISOLATED_METHOD last_recorded_gc_info* get_completed_bgc_info();
#endif //BACKGROUND_GC
- // Keeps track of the highest address allocated by Alloc
- // Used in allocator code path. Blocking GCs do use it at the beginning (to update heap_segment_allocated) and
- // at the end they get initialized for the allocator.
- PER_HEAP_FIELD_MAINTAINED_ALLOC uint8_t* alloc_allocated;
+#ifdef SYNCHRONIZATION_STATS
+ PER_HEAP_METHOD void init_heap_sync_stats()
+ {
+ good_suspension = 0;
+ bad_suspension = 0;
+ num_msl_acquired = 0;
+ total_msl_acquire = 0;
+ num_high_msl_acquire = 0;
+ num_low_msl_acquire = 0;
+ more_space_lock.init();
+ gc_lock.init();
+ }
- // For regions this is the region we currently allocate in. Set by a blocking GC at the end.
- PER_HEAP_FIELD_MAINTAINED_ALLOC heap_segment* ephemeral_heap_segment;
+ PER_HEAP_METHOD void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
+ {
+ printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
+ heap_num,
+ alloc_contexts_used,
+ good_suspension,
+ bad_suspension,
+ (unsigned int)(total_msl_acquire / gc_count_during_log),
+ num_high_msl_acquire / gc_count_during_log,
+ num_low_msl_acquire / gc_count_during_log,
+ num_msl_acquired / gc_count_during_log,
+ more_space_lock.num_switch_thread / gc_count_during_log,
+ more_space_lock.num_wait_longer / gc_count_during_log,
+ more_space_lock.num_switch_thread_w / gc_count_during_log,
+ more_space_lock.num_disable_preemptive_w / gc_count_during_log);
+ }
+#endif //SYNCHRONIZATION_STATS
- // Used by both the allocator (which adds entries to the queue) and the GC (moves entries on the queue)
- // The finalizer thread also removes entry from it.
- PER_HEAP_FIELD_MAINTAINED_ALLOC CFinalize* finalize_queue;
+#ifdef FEATURE_EVENT_TRACE
+ PER_HEAP_ISOLATED_METHOD void record_mark_time (uint64_t& mark_time,
+ uint64_t& current_mark_time,
+ uint64_t& last_mark_time);
- // This field is used by the !sos.AnalyzeOOM command and is updated by the last OOM GC sees.
- PER_HEAP_FIELD_DIAG_ONLY oom_history oom_info;
+ PER_HEAP_METHOD void init_bucket_info();
- // The following 4 fields are used by the !sos.DumpGCData command.
- PER_HEAP_FIELD_DIAG_ONLY size_t interesting_data_per_heap[max_idp_count];
- PER_HEAP_FIELD_DIAG_ONLY size_t compact_reasons_per_heap[max_compact_reasons_count];
- PER_HEAP_FIELD_DIAG_ONLY size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
- PER_HEAP_FIELD_DIAG_ONLY size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
+ PER_HEAP_METHOD void add_plug_in_condemned_info (generation* gen, size_t plug_size);
- // The following 3 fields are used in a single GC when we detect that the DAC notification has been
- // enabled to find roots for the !sos.FindRoots command.
- PER_HEAP_FIELD_DIAG_ONLY uint8_t** internal_root_array;
- PER_HEAP_FIELD_DIAG_ONLY size_t internal_root_array_index;
- PER_HEAP_FIELD_DIAG_ONLY BOOL heap_analyze_success;
+ PER_HEAP_METHOD void fire_etw_allocation_event (size_t allocation_amount,
+ int gen_number,
+ uint8_t* object_address,
+ size_t object_size);
- // The value of this array is set during init time and remains unchanged but the content
- // of each element is maintained during GCs.
- PER_HEAP_FIELD_MAINTAINED generation generation_table[total_generation_count];
+ PER_HEAP_METHOD void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
+
+#ifdef FEATURE_LOH_COMPACTION
+ PER_HEAP_METHOD void loh_reloc_survivor_helper (uint8_t** pval,
+ size_t& total_refs,
+ size_t& zero_refs);
+#endif //FEATURE_LOH_COMPACTION
+#endif //FEATURE_EVENT_TRACE
+
+ // dynamic tuning.
+ PER_HEAP_METHOD BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
+ // if elevate_p is FALSE, it means we are determining fragmentation for a generation
+ // to see if we should condemn this gen; otherwise it means we are determining if
+ // we should elevate to doing max_gen from an ephemeral gen.
+ PER_HEAP_METHOD BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
+ PER_HEAP_METHOD BOOL dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
+ PER_HEAP_METHOD BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
+ PER_HEAP_METHOD BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
+
+#ifdef FEATURE_CARD_MARKING_STEALING
+ PER_HEAP_METHOD void reset_card_marking_enumerators()
+ {
+ // set chunk index to all 1 bits so that incrementing it yields 0 as the first index
+ card_mark_chunk_index_soh = ~0;
+ card_mark_done_soh = false;
+
+ card_mark_chunk_index_loh = ~0;
+ card_mark_chunk_index_poh = ~0;
+ card_mark_done_uoh = false;
+ }
+
+ PER_HEAP_METHOD bool find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg,
+ size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit,
+ size_t& card, size_t& end_card, size_t& card_word_end);
+#endif //FEATURE_CARD_MARKING_STEALING
+
+ PER_HEAP_ISOLATED_METHOD size_t exponential_smoothing (int gen, size_t collection_count, size_t desired_per_heap);
+
+ PER_HEAP_ISOLATED_METHOD BOOL dt_high_memory_load_p();
+
+ PER_HEAP_METHOD void update_collection_counts ();
+
+ /*****************************************************************************************************************/
+ // per heap fields //
+ /*****************************************************************************************************************/
+
+ /***********************************/
+ // PER_HEAP_FIELD_SINGLE_GC fields //
+ /***********************************/
+
+ // Used by a GC to wait up allocating threads waiting for GC to be done.
+ PER_HEAP_FIELD_SINGLE_GC GCEvent gc_done_event;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(int32_t) gc_done_event_lock;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) gc_done_event_set;
+
+ PER_HEAP_FIELD_SINGLE_GC int condemned_generation_num;
+ PER_HEAP_FIELD_SINGLE_GC BOOL blocking_collection;
+ PER_HEAP_FIELD_SINGLE_GC BOOL elevation_requested;
+
+ PER_HEAP_FIELD_SINGLE_GC mark_queue_t mark_queue;
+
+ PER_HEAP_FIELD_SINGLE_GC int gc_policy; //sweep, compact, expand
+
+ PER_HEAP_FIELD_SINGLE_GC size_t total_promoted_bytes;
+ PER_HEAP_FIELD_SINGLE_GC size_t finalization_promoted_bytes;
+
+ PER_HEAP_FIELD_SINGLE_GC size_t mark_stack_tos;
+ PER_HEAP_FIELD_SINGLE_GC size_t mark_stack_bos;
+
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* oldest_pinned_plug;
+
+ PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list_end;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list_index;
+
+ PER_HEAP_FIELD_SINGLE_GC uint8_t*** mark_list_piece_start;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t*** mark_list_piece_end;
+
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* min_overflow_address;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* max_overflow_address;
+
+ PER_HEAP_FIELD_SINGLE_GC size_t alloc_contexts_used;
+
+ // When we decide if we should expand the heap or not, we are
+ // fine NOT to expand if we find enough free space in gen0's free
+ // list or end of seg and we check this in decide_on_compacting.
+ // This is an expensive check so we just record the fact and not
+ // need to check in the allocator again.
+ //
+ // Set during a GC and checked by allocator after that GC
+ PER_HEAP_FIELD_SINGLE_GC BOOL sufficient_gen0_space_p;
+
+ // TODO: should just get rid of this for regions.
+ PER_HEAP_FIELD_SINGLE_GC BOOL ro_segments_in_range;
+
+ PER_HEAP_FIELD_SINGLE_GC bool no_gc_oom_p;
+ PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_loh_segment_no_gc;
+
+#ifdef MULTIPLE_HEAPS
+#ifndef USE_REGIONS
+ PER_HEAP_FIELD_SINGLE_GC heap_segment* new_heap_segment;
+#endif //!USE_REGIONS
+#else //MULTIPLE_HEAPS
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* shigh; //keeps track of the highest marked object
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* slow; //keeps track of the lowest marked object
+#endif //MULTIPLE_HEAPS
+
+#ifdef BACKGROUND_GC
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(bgc_state) current_bgc_state;
+
+ PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_loh_size;
+ PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_poh_size;
+ PER_HEAP_FIELD_SINGLE_GC size_t end_loh_size;
+ PER_HEAP_FIELD_SINGLE_GC size_t end_poh_size;
+
+ // We can't process the ephemeral range concurrently so we
+ // wait till final mark to process it.
+ PER_HEAP_FIELD_SINGLE_GC BOOL processed_eph_overflow_p;
+
+ // This is marked as SINGLE_GC, as in, it's initialized in each BGC and used throughout that BGC.
+ // But the ephemeral GCs that happen during this BGC so in that sense it can be used in multiple GCs.
+ PER_HEAP_FIELD_SINGLE_GC size_t c_mark_list_index;
+
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* next_sweep_obj;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* current_sweep_pos;
+#ifdef DOUBLY_LINKED_FL
+ PER_HEAP_FIELD_SINGLE_GC heap_segment* current_sweep_seg;
+#endif //DOUBLY_LINKED_FL
#ifdef USE_REGIONS
-#ifdef STRESS_REGIONS
- // TODO: could consider dynamically grow this.
- // Right now the way it works -
- // For each gen0 region, pin an object somewhere near the beginning and middle.
- // When we run out start replacing objects pinned by the earlier handles.
-#define PINNING_HANDLE_INITIAL_LENGTH 128
- PER_HEAP_FIELD_DIAG_ONLY OBJECTHANDLE* pinning_handles_for_alloc;
- PER_HEAP_FIELD_DIAG_ONLY int ph_index_per_heap;
- PER_HEAP_FIELD_DIAG_ONLY int pinning_seg_interval;
- PER_HEAP_FIELD_DIAG_ONLY size_t num_gen0_regions;
- PER_HEAP_FIELD_DIAG_ONLY int sip_seg_interval;
- PER_HEAP_FIELD_DIAG_ONLY int sip_seg_maxgen_interval;
- PER_HEAP_FIELD_DIAG_ONLY size_t num_condemned_regions;
-#endif //STRESS_REGIONS
+ PER_HEAP_FIELD_SINGLE_GC BOOL background_overflow_p;
+#else
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* background_min_overflow_address;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* background_max_overflow_address;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* background_min_soh_overflow_address;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* background_max_soh_overflow_address;
+ PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_overflow_ephemeral_seg;
+ PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_sweep_ephemeral_seg;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* saved_sweep_ephemeral_start;
- // This is updated during each GC and used by the allocator path to get more regions during allocation.
- PER_HEAP_FIELD_MAINTAINED_ALLOC region_free_list free_regions[count_free_region_kinds];
+ PER_HEAP_FIELD_SINGLE_GC size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
+ PER_HEAP_FIELD_SINGLE_GC size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
+ PER_HEAP_FIELD_SINGLE_GC size_t ordered_plug_indices[MAX_NUM_BUCKETS];
+ PER_HEAP_FIELD_SINGLE_GC size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
+ PER_HEAP_FIELD_SINGLE_GC BOOL ordered_plug_indices_init;
+ PER_HEAP_FIELD_SINGLE_GC BOOL use_bestfit;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* bestfit_first_pin;
+ PER_HEAP_FIELD_SINGLE_GC BOOL commit_end_of_seg;
+ PER_HEAP_FIELD_SINGLE_GC size_t max_free_space_items; // dynamically adjusted.
+ PER_HEAP_FIELD_SINGLE_GC size_t free_space_buckets;
+ PER_HEAP_FIELD_SINGLE_GC size_t free_space_items;
+ // -1 means we are using all the free
+ // spaces we have (not including
+ // end of seg space).
+ PER_HEAP_FIELD_SINGLE_GC int trimmed_free_space_index;
+ PER_HEAP_FIELD_SINGLE_GC size_t total_ephemeral_plugs;
+ PER_HEAP_FIELD_SINGLE_GC seg_free_spaces* bestfit_seg;
+ // Note: we know this from the plan phase.
+ // total_ephemeral_plugs actually has the same value
+ // but while we are calculating its value we also store
+ // info on how big the plugs are for best fit which we
+ // don't do in plan phase.
+ // TODO: get rid of total_ephemeral_plugs.
+ PER_HEAP_FIELD_SINGLE_GC size_t total_ephemeral_size;
+#endif //!USE_REGIONS
- // This is the number of regions we would free up if we sweep.
- // It's used in the decision for compaction so we calculate it in plan.
- PER_HEAP_FIELD_SINGLE_GC int num_regions_freed_in_sweep;
+#ifdef WRITE_WATCH
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* background_written_addresses[array_size + 2];
+#endif //WRITE_WATCH
- // Used in a single GC.
- PER_HEAP_FIELD_DIAG_ONLY int regions_per_gen[max_generation + 1];
+#ifdef SNOOP_STATS
+ PER_HEAP_FIELD_SINGLE_GC snoop_stats_data snoop_stat;
+#endif //SNOOP_STATS
+
+#ifdef BGC_SERVO_TUNING
+ PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_no_bgc;
+ PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_bgc_marking;
+ PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_bgc_planning;
+ PER_HEAP_FIELD_SINGLE_GC size_t bgc_maxgen_end_fl_size;
+#endif //BGC_SERVO_TUNING
+#endif //BACKGROUND_GC
+
+#ifdef USE_REGIONS
+// This is the number of regions we would free up if we sweep.
+// It's used in the decision for compaction so we calculate it in plan.
+ PER_HEAP_FIELD_SINGLE_GC int num_regions_freed_in_sweep;
PER_HEAP_FIELD_SINGLE_GC int sip_maxgen_regions_per_gen[max_generation + 1];
PER_HEAP_FIELD_SINGLE_GC heap_segment* reserved_free_regions_sip[max_generation];
PER_HEAP_FIELD_SINGLE_GC size_t gen0_pinned_free_space;
PER_HEAP_FIELD_SINGLE_GC bool gen0_large_chunk_found;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t regions_range;
-
// Each GC thread maintains its own record of survived/survived due to
// old gen cards pointing into that region. These allow us to make the
// following decisions -
PER_HEAP_FIELD_SINGLE_GC size_t* survived_per_region;
PER_HEAP_FIELD_SINGLE_GC size_t* old_card_survived_per_region;
- // Initialized in a blocking GC at the beginning of the mark phase
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t region_count;
+ PER_HEAP_FIELD_SINGLE_GC bool special_sweep_p;
- // table mapping region number to generation
- // there are actually two generation numbers per entry:
- // - the region's current generation
- // - the region's planned generation, i.e. after the GC
- // and there are flags
- // - whether the region is sweep in plan
- // - and whether the region is demoted
- enum region_info : uint8_t
- {
- // lowest 2 bits are current generation number
- RI_GEN_0 = 0x0,
- RI_GEN_1 = 0x1,
- RI_GEN_2 = 0x2,
- RI_GEN_MASK = 0x3,
+#else //USE_REGIONS
+ // Highest and lowest address for ephemeral generations.
+ // For regions these are global fields.
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* ephemeral_low;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* ephemeral_high;
- // we have 4 bits available for flags, of which 2 are used
- RI_SIP = 0x4,
- RI_DEMOTED = 0x8,
+ // For regions these are global fields
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* gc_low; // lowest address being condemned
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* gc_high; // highest address being condemned
- // top 2 bits are planned generation number
- RI_PLAN_GEN_SHR = 0x6, // how much to shift the value right to obtain plan gen
- RI_PLAN_GEN_0 = 0x00,
- RI_PLAN_GEN_1 = 0x40,
- RI_PLAN_GEN_2 = 0x80,
- RI_PLAN_GEN_MASK= 0xC0,
- };
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* demotion_low;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* demotion_high;
+ PER_HEAP_FIELD_SINGLE_GC BOOL demote_gen1_p;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* last_gen1_pin_end;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED region_info* map_region_to_generation;
- // same table as above, but skewed so that we can index
- // directly with address >> min_segment_size_shr
- // This is passed the write barrier code.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED region_info* map_region_to_generation_skewed;
+ PER_HEAP_FIELD_SINGLE_GC BOOL ephemeral_promotion;
+ PER_HEAP_FIELD_SINGLE_GC uint8_t* saved_ephemeral_plan_start[ephemeral_generation_count];
+ PER_HEAP_FIELD_SINGLE_GC size_t saved_ephemeral_plan_start_size[ephemeral_generation_count];
#endif //USE_REGIONS
-#define max_oom_history_count 4
+#ifdef FEATURE_CARD_MARKING_STEALING
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_soh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) card_mark_done_soh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_loh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_poh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) card_mark_done_uoh;
- PER_HEAP_FIELD_DIAG_ONLY int oomhist_index_per_heap;
- PER_HEAP_FIELD_DIAG_ONLY oom_history oomhist_per_heap[max_oom_history_count];
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_eph_soh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_gen_soh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_eph_loh;
+ PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_gen_loh;
+#endif //FEATURE_CARD_MARKING_STEALING
- PER_HEAP_METHOD void add_to_oom_history_per_heap();
+#ifdef DOUBLY_LINKED_FL
+ // For bucket 0 added list, we don't want to have to go through
+ // it to count how many bytes it has so we keep a record here.
+ // If we need to sweep in gen1, we discard this added list and
+ // need to deduct the size from free_list_space.
+ // Note that we should really move this and the free_list_space
+ // accounting into the alloc_list class.
+ PER_HEAP_FIELD_SINGLE_GC size_t gen2_removed_no_undo;
- PER_HEAP_ISOLATED_METHOD uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
+#define INVALID_SAVED_PINNED_PLUG_INDEX ((size_t)~0)
- // Returns TRUE if the current thread used to be in cooperative mode
- // before calling this function.
- PER_HEAP_ISOLATED_METHOD bool enable_preemptive ();
- PER_HEAP_ISOLATED_METHOD void disable_preemptive (bool restore_cooperative);
+ PER_HEAP_FIELD_SINGLE_GC size_t saved_pinned_plug_index;
+#endif //DOUBLY_LINKED_FL
- // Used by a GC to wait up allocating threads waiting for GC to be done.
- PER_HEAP_FIELD_SINGLE_GC GCEvent gc_done_event;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(int32_t) gc_done_event_lock;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) gc_done_event_set;
+#ifdef FEATURE_LOH_COMPACTION
+ PER_HEAP_FIELD_SINGLE_GC size_t loh_pinned_queue_tos;
+ PER_HEAP_FIELD_SINGLE_GC size_t loh_pinned_queue_bos;
- PER_HEAP_METHOD void set_gc_done();
- PER_HEAP_METHOD void reset_gc_done();
- PER_HEAP_METHOD void enter_gc_done_event_lock();
- PER_HEAP_METHOD void exit_gc_done_event_lock();
+ // We may not compact LOH on every heap if we can't
+ // grow the pinned queue. This is to indicate whether
+ // this heap's LOH is compacted or not. So even if
+ // settings.loh_compaction is TRUE this may not be TRUE.
+ PER_HEAP_FIELD_SINGLE_GC BOOL loh_compacted_p;
+#endif //FEATURE_LOH_COMPACTION
- // Highest and lowest address for ephemeral generations.
-#ifdef USE_REGIONS
- // For regions these are only used during a GC (init-ed at beginning of mark and
- // used later in that GC).
- // They could be used for WB but we currently don't use them for that purpose, even
- // thought we do pass them to the WB code.
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(uint8_t*) ephemeral_low;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(uint8_t*) ephemeral_high;
-#else //!USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC uint8_t* ephemeral_low;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* ephemeral_high;
-#endif //!USE_REGIONS
+ /*****************************************/
+ // PER_HEAP_FIELD_SINGLE_GC_ALLOC fields //
+ /*****************************************/
+
+ // calculated at the end of a GC and used in allocator
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t allocation_quantum;
+
+ // TODO: actually a couple of entries in these elements are carried over from GC to GC -
+ // collect_count and previous_time_clock. It'd be nice to isolate these out.
+ // Only field used by allocation is new_allocation.
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC dynamic_data dynamic_data_table[total_generation_count];
+
+ // the # of bytes allocates since the last full compacting GC, maintained by the allocator and
+ // reset during the next full compacting GC
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC uint64_t loh_alloc_since_cg;
+
+ // if this is TRUE, we should always guarantee that we do a
+ // full compacting GC before we OOM.
+ // set by the allocator/GC and cleared during a full blocking GC
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC BOOL last_gc_before_oom;
+
+#ifdef MULTIPLE_HEAPS
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC VOLATILE(int) alloc_context_count;
- PER_HEAP_FIELD_MAINTAINED uint32_t* card_table;
- // In addition to being updated during GCs, this field is updated by the allocator code paths
- // and find_object
- PER_HEAP_FIELD_MAINTAINED_ALLOC short* brick_table;
+ // Init-ed during a GC and updated by allocator after that GC
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC bool gen0_allocated_after_gc_p;
+#endif //MULTIPLE_HEAPS
#ifdef BACKGROUND_GC
- // In addition to being updated during GCs, this field is also updated by the UOH allocator code paths
- PER_HEAP_FIELD_MAINTAINED_ALLOC uint32_t* mark_array;
+ // This includes what we allocate at the end of segment - allocating
+ // in free list doesn't increase the heap size.
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_loh_size_increased;
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_poh_size_increased;
+
+
+ // Updated by the allocator and reinit-ed in each BGC
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_soh_alloc_count;
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_uoh_alloc_count;
+
+ PER_HEAP_FIELD_SINGLE_GC_ALLOC VOLATILE(int32_t) uoh_alloc_thread_count;
#endif //BACKGROUND_GC
-#ifdef CARD_BUNDLE
- // In addition to being updated during GCs, this field is also updated by WB
- PER_HEAP_FIELD_MAINTAINED uint32_t* card_bundle_table;
-#endif //CARD_BUNDLE
+ /************************************/
+ // PER_HEAP_FIELD_MAINTAINED fields //
+ /************************************/
-#ifdef FEATURE_BASICFREEZE
- PER_HEAP_ISOLATED_FIELD_MAINTAINED sorted_table* seg_table;
-#endif //FEATURE_BASICFREEZE
+ // The value of this array is set during init time and remains unchanged but the content
+ // of each element is maintained during GCs.
+ PER_HEAP_FIELD_MAINTAINED generation generation_table[total_generation_count];
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(BOOL) gc_started;
+ // These are loosely maintained, ie, could be reinitialized at any GC if needed. All that's
+ // maintained is just the # of elements in mark_stack_array.
+ // The content of mark_stack_array is only maintained during a single GC.
+ PER_HEAP_FIELD_MAINTAINED size_t mark_stack_array_length;
+ PER_HEAP_FIELD_MAINTAINED mark* mark_stack_array;
- // The following 2 events are there to support the gen2 GC notification which is only fired if a full blocking GC
- // is about to happen
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC GCEvent full_gc_approach_event;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC GCEvent full_gc_end_event;
+ // This one is unusual, it's calculated in one GC and used in the next GC. so it's maintained
+ // but only maintained till the next GC.
+ // It only affects perf.
+ PER_HEAP_FIELD_MAINTAINED int generation_skip_ratio;//in %
- // Full GC Notification percentages. It's set by the RegisterForFullGCNotification API
- PER_HEAP_FIELD uint32_t fgn_maxgen_percent;
- PER_HEAP_ISOLATED_FIELD uint32_t fgn_loh_percent;
+ // This is also changed by find_object
+ // It only affects perf.
+ PER_HEAP_FIELD_MAINTAINED int gen0_must_clear_bricks;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(bool) full_gc_approach_event_set;
+ // This is maintained as BGC can indicate regions to add to it and the next blocking GC will thread
+ // these regions to this list. A blocking GC can also add to this list. Regions on this list will be
+ // freed later during that GC.
+ PER_HEAP_FIELD_MAINTAINED heap_segment* freeable_uoh_segment;
-#ifdef USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC bool special_sweep_p;
-#endif
+ // These *alloc_list fields are init-ed once and used throughput process lifetime, they contained fields
+ // that are maintained via these generations' free_list_allocator. LOH/POH's alloc_lists are also used
+ // by the allocator so they are in the PER_HEAP_FIELD_MAINTAINED_ALLOC section.
+#define NUM_GEN2_ALIST (12)
+#ifdef HOST_64BIT
+ // bucket 0 contains sizes less than 256
+#define BASE_GEN2_ALIST_BITS (7)
+#else
+ // bucket 0 contains sizes less than 128
+#define BASE_GEN2_ALIST_BITS (6)
+#endif // HOST_64BIT
+ PER_HEAP_FIELD_MAINTAINED alloc_list gen2_alloc_list[NUM_GEN2_ALIST - 1];
#ifdef BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL fgn_last_gc_was_concurrent;
+ // Loosely maintained. Can change if the BGC thread times out and re-created.
+ PER_HEAP_FIELD_MAINTAINED EEThreadId bgc_thread_id;
+ // Maintained and remains TRUE as long as the BGC thread doesn't timeout
+ PER_HEAP_FIELD_MAINTAINED BOOL bgc_thread_running; // gc thread is its main loop
+ // Maintained and remains the same as long as the BGC thread doesn't timeout
+ PER_HEAP_FIELD_MAINTAINED Thread* bgc_thread;
+
+ // These are loosely maintained, same deal as mark_stack_array, the length is maintained
+ // but can be reinit-ed if needed. But otherwise they are used in a single GC.
+ // TODO: note that we do reinit background_mark_stack_tos in grow_bgc_mark_stack but there's really
+ // no need because we always initialize it on entry of background_mark_simple1
+ PER_HEAP_FIELD_MAINTAINED uint8_t** background_mark_stack_tos;
+ PER_HEAP_FIELD_MAINTAINED uint8_t** background_mark_stack_array;
+ PER_HEAP_FIELD_MAINTAINED size_t background_mark_stack_array_length;
+
+ // Loosedly maintained, can be reinit-ed in background_grow_c_mark_list.
+ // The content of c_mark_list is only maintained during a single BGC, c_mark_list_index is init-ed to 0
+ // at the beginning of a BGC.
+ PER_HEAP_FIELD_MAINTAINED uint8_t** c_mark_list;
+ PER_HEAP_FIELD_MAINTAINED size_t c_mark_list_length;
+
+ // This is maintained in the way that BGC will add to it and it will be changed
+ // during the first blocking GC happens after this BGC is finished.
+ PER_HEAP_FIELD_MAINTAINED heap_segment* freeable_soh_segment;
#endif //BACKGROUND_GC
- PER_HEAP_FIELD_SINGLE_GC size_t fgn_last_alloc;
+#ifdef FEATURE_LOH_COMPACTION
+ // These are loosely maintained, ie, could be reinitialized at any GC if needed. All that's
+ // maintained is the # of elements in loh_pinned_queue, and when we should delete loh_pinned_queue
+ // if LOH compaction hasn't happened for a while
+ // The content is only maintained during a single GC.
+ PER_HEAP_FIELD_MAINTAINED size_t loh_pinned_queue_length;
+ PER_HEAP_FIELD_MAINTAINED int loh_pinned_queue_decay;
+ PER_HEAP_FIELD_MAINTAINED mark* loh_pinned_queue;
+#endif //FEATURE_LOH_COMPACTION
- PER_HEAP_ISOLATED_METHOD uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE);
+ /******************************************/
+ // PER_HEAP_FIELD_MAINTAINED_ALLOC fields //
+ /******************************************/
- PER_HEAP_ISOLATED_METHOD wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms);
+ PER_HEAP_FIELD_MAINTAINED_ALLOC BOOL gen0_bricks_cleared;
-#ifndef USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC uint8_t* demotion_low;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* demotion_high;
- PER_HEAP_FIELD_SINGLE_GC BOOL demote_gen1_p;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* last_gen1_pin_end;
-#endif //!USE_REGIONS
+ // These *alloc_list fields are init-ed once and used throughput process lifetime, they contained fields
+ // that are maintained via these generations' free_list_allocator. UOH's alloc_lists are also used
+ // by the allocator.
+#define NUM_LOH_ALIST (7)
+ // bucket 0 contains sizes less than 64*1024
+ // the "BITS" number here is the highest bit in 64*1024 - 1, zero-based as in BitScanReverse.
+ // see first_suitable_bucket(size_t size) for details.
+#define BASE_LOH_ALIST_BITS (15)
+ PER_HEAP_FIELD_MAINTAINED_ALLOC alloc_list loh_alloc_list[NUM_LOH_ALIST - 1];
- PER_HEAP_FIELD_DIAG_ONLY gen_to_condemn_tuning gen_to_condemn_reasons;
- PER_HEAP_FIELD_DIAG_ONLY size_t etw_allocation_running_amount[total_oh_count];
- PER_HEAP_FIELD_DIAG_ONLY uint64_t total_alloc_bytes_soh;
- PER_HEAP_FIELD_DIAG_ONLY uint64_t total_alloc_bytes_uoh;
+#define NUM_POH_ALIST (19)
+ // bucket 0 contains sizes less than 256
+#define BASE_POH_ALIST_BITS (7)
+ PER_HEAP_FIELD_MAINTAINED_ALLOC alloc_list poh_alloc_list[NUM_POH_ALIST - 1];
- PER_HEAP_FIELD_SINGLE_GC int gc_policy; //sweep, compact, expand
+ // Keeps track of the highest address allocated by Alloc
+ // Used in allocator code path. Blocking GCs do use it at the beginning (to update heap_segment_allocated) and
+ // at the end they get initialized for the allocator.
+ PER_HEAP_FIELD_MAINTAINED_ALLOC uint8_t* alloc_allocated;
-#ifdef MULTIPLE_HEAPS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool gc_thread_no_affinitize_p;
+ // For regions this is the region we currently allocate in. Set by a blocking GC at the end.
+ PER_HEAP_FIELD_MAINTAINED_ALLOC heap_segment* ephemeral_heap_segment;
- // These 2 fields' values do not change but are set/unset per GC
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent gc_start_event;
- PER_HEAP_ISOLATED_FIELD GCEvent ee_suspend_event;
+ // Used by both the allocator (which adds entries to the queue) and the GC (moves entries on the queue)
+ // The finalizer thread also removes entry from it.
+ PER_HEAP_FIELD_MAINTAINED_ALLOC CFinalize* finalize_queue;
-#ifndef USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC heap_segment* new_heap_segment;
-#endif //!USE_REGIONS
+#ifdef USE_REGIONS
+ // This is updated during each GC and used by the allocator path to get more regions during allocation.
+ PER_HEAP_FIELD_MAINTAINED_ALLOC region_free_list free_regions[count_free_region_kinds];
+#endif //USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_gen0_balance_delta;
+ /*******************************/
+ // PER_HEAP_FIELD_ALLOC fields //
+ /*******************************/
-#define alloc_quantum_balance_units (16)
+ // Note that for Server GC we do release this lock on the heap#0 GC thread after we are done with the GC work.
+ PER_HEAP_FIELD_ALLOC GCSpinLock more_space_lock_soh; //lock while allocating more space for soh
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_balance_threshold;
-#else //MULTIPLE_HEAPS
+ PER_HEAP_FIELD_ALLOC GCSpinLock more_space_lock_uoh;
+
+ PER_HEAP_FIELD_ALLOC size_t soh_allocation_no_gc;
+ PER_HEAP_FIELD_ALLOC size_t loh_allocation_no_gc;
+#ifdef MULTIPLE_HEAPS
+#else //MULTIPLE_HEAPS
// Used in the allocator code paths to decide if we should trigger GCs
PER_HEAP_FIELD_ALLOC uint64_t allocation_running_time;
PER_HEAP_FIELD_ALLOC size_t allocation_running_amount;
-
#endif //MULTIPLE_HEAPS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY gc_latency_level latency_level;
+ /***********************************/
+ // PER_HEAP_FIELD_INIT_ONLY fields //
+ /***********************************/
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_mechanisms settings;
+ // TODO: for regions we should be able to just get rid of these - they don't change and
+ // we can just use g_gc_lowest_address/g_gc_highest_address instead
+ PER_HEAP_FIELD_INIT_ONLY uint8_t* lowest_address;
+ PER_HEAP_FIELD_INIT_ONLY uint8_t* highest_address;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_history_global gc_data_global;
+ // Bookkeeping data structures. Even though these are per heap fields, they really point to
+ // the global one for regions. And since we only do in place grow, these values don't ever
+ // change after they are initialized. For segments they can get different values.
+ PER_HEAP_FIELD_INIT_ONLY uint32_t* card_table;
+ // The content is updated during GCs, by the allocator code paths and byfind_object.
+ PER_HEAP_FIELD_INIT_ONLY short* brick_table;
+#ifdef CARD_BUNDLE
+ // The content is updated during GCs and by WB
+ PER_HEAP_FIELD_INIT_ONLY uint32_t* card_bundle_table;
+#endif //CARD_BUNDLE
+#ifdef BACKGROUND_GC
+ // The content is updated during GCs and by the UOH allocator code paths
+ PER_HEAP_FIELD_INIT_ONLY uint32_t* mark_array;
+#endif //BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_MAINTAINED uint64_t gc_last_ephemeral_decommit_time;
+#ifdef MULTIPLE_HEAPS
+ PER_HEAP_FIELD_INIT_ONLY GCHeap* vm_heap;
+ PER_HEAP_FIELD_INIT_ONLY int heap_number;
+#else //MULTIPLE_HEAPS
+#define vm_heap ((GCHeap*) g_theGCHeap)
+#define heap_number (0)
+#endif //MULTIPLE_HEAPS
+
+#ifdef BACKGROUND_GC
+ // We only use this when we need to timeout BGC threads.
+ PER_HEAP_FIELD_INIT_ONLY CLRCriticalSection bgc_threads_timeout_cs;
-#if defined(SHORT_PLUGS) && !defined(USE_REGIONS)
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY double short_plugs_pad_ratio;
-#endif //SHORT_PLUGS && !USE_REGIONS
+ // For regions these are the same as g_gc_lowest_address/g_gc_highest_address
+ // and never change.
+ PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_lowest_address;
+ PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_highest_address;
- // We record the time GC work is done while EE is suspended.
- // suspended_start_ts is what we get right before we call
- // SuspendEE. We omit the time between GC end and RestartEE
- // because it's very short and by the time we are calling it
- // the settings may have changed and we'd have to do more work
- // to figure out the right GC to record info of.
- //
- // The complications are the GCs triggered without their own
- // SuspendEE, in which case we will record that GC's duration
- // as its pause duration and the rest toward the GC that
- // the SuspendEE was for. The ephemeral GC we might trigger
- // at the beginning of a BGC and the PM triggered full GCs
- // fall into this case.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t suspended_start_time;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t end_gc_time;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t total_suspended_time;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t process_start_time;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_ephemeral_gc_info;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_full_blocking_gc_info;
+ // This is used for synchronization between the bgc thread
+ // for this heap and the user threads allocating on this
+ // heap.
+ PER_HEAP_FIELD_INIT_ONLY exclusive_sync* bgc_alloc_lock;
+#endif //BACKGROUND_GC
-#ifdef BACKGROUND_GC
- // If the user didn't specify which kind of GC info to return, we need
- // to return the last recorded one. There's a complication with BGC as BGC
- // end runs concurrently. If 2 BGCs run back to back, we can't have one
- // update the info while the user thread is reading it (and we'd still like
- // to return the last BGC info otherwise if we only did BGCs we could frequently
- // return nothing). So we maintain 2 of these for BGC and the older one is
- // guaranteed to be consistent.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_bgc_info[2];
- // This is either 0 or 1.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY VOLATILE(int) last_bgc_info_index;
- // Since a BGC can finish later than blocking GCs with larger indices,
- // we can't just compare the index recorded in the GC info. We use this
- // to know whether we should be looking for a bgc info or a blocking GC,
- // if the user asks for the latest GC info of any kind.
- // This can only go from false to true concurrently so if it is true,
- // it means the bgc info is ready.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY VOLATILE(bool) is_last_recorded_bgc;
+ /***********************************/
+ // PER_HEAP_FIELD_DIAG_ONLY fields //
+ /***********************************/
- PER_HEAP_ISOLATED_METHOD void add_bgc_pause_duration_0();
+ // Only used for dprintf
+ PER_HEAP_FIELD_DIAG_ONLY uint64_t time_bgc_last;
- PER_HEAP_ISOLATED_METHOD last_recorded_gc_info* get_completed_bgc_info();
-#endif //BACKGROUND_GC
+ PER_HEAP_FIELD_DIAG_ONLY gc_history_per_heap gc_data_per_heap;
-#ifdef HOST_64BIT
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t youngest_gen_desired_th;
-#endif //HOST_64BIT
+ // TODO! This is not updated for regions and should be!
+ PER_HEAP_FIELD_DIAG_ONLY size_t maxgen_pinned_compact_before_advance;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t high_memory_load_th;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t m_high_memory_load_th;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t v_high_memory_load_th;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool is_restricted_physical_mem;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint64_t mem_one_percent;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint64_t total_physical_mem;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint64_t entry_available_physical_mem;
-
- // Hard limit for the heap, only supported on 64-bit.
- //
- // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
- // a percentage of the physical memory this process is allowed to use via
- // GCHeapHardLimitPercent. This is the maximum commit size the GC heap
- // can consume.
- //
- // The way the hard limit is decided is:
- //
- // If the GCHeapHardLimit config is specified that's the value we use;
- // else if the GCHeapHardLimitPercent config is specified we use that
- // value;
- // else if the process is running inside a container with a memory limit,
- // the hard limit is
- // max (20mb, 75% of the memory limit on the container).
- //
- // Due to the different perf charicteristics of containers we make the
- // following policy changes:
- //
- // 1) No longer affinitize Server GC threads by default because we wouldn't
- // want all the containers on the machine to only affinitize to use the
- // first few CPUs (and we don't know which CPUs are already used). You
- // can however override this by specifying the GCHeapAffinitizeMask
- // config which will decide which CPUs the process will affinitize the
- // Server GC threads to.
- //
- // 2) Segment size is determined by limit / number of heaps but has a
- // minimum value of 16mb. This can be changed by specifying the number
- // of heaps via the GCHeapCount config. The minimum size is to avoid
- // the scenario where the hard limit is small but the process can use
- // many procs and we end up with tiny segments which doesn't make sense.
- //
- // 3) LOH compaction occurs automatically if needed.
- //
- // Since we do allow both gen0 and gen3 allocations, and we don't know
- // the distinction (and it's unrealistic to request users to specify
- // this distribution) we reserve memory this way -
- //
- // For SOH we reserve (limit / number of heaps) per heap.
- // For LOH we reserve (limit * 2 / number of heaps) per heap.
- //
- // This means the following -
- //
- // + we never need to acquire new segments. This simplies the perf
- // calculations by a lot.
- //
- // + we now need a different definition of "end of seg" because we
- // need to make sure the total does not exceed the limit.
- //
- // + if we detect that we exceed the commit limit in the allocator we
- // wouldn't want to treat that as a normal commit failure because that
- // would mean we always do full compacting GCs.
- //
- // TODO: some of the logic here applies to the general case as well
- // such as LOH automatic compaction. However it will require more
- //testing to change the general case.
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit_oh[total_oh_count];
-
- // Used both in a GC and on the allocator code paths when heap_hard_limit is non zero
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY CLRCriticalSection check_commit_cs;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC size_t current_total_committed;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC size_t committed_by_oh[recorded_committed_bucket_counts];
-
-#if defined (_DEBUG) && defined (MULTIPLE_HEAPS)
- PER_HEAP_FIELD_DIAG_ONLY size_t committed_by_oh_per_heap[total_oh_count];
-#endif // _DEBUG && MULTIPLE_HEAPS
-
- // This is what GC uses for its own bookkeeping.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t current_total_committed_bookkeeping;
-
- // This is if large pages should be used.
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p;
-
-#ifdef HEAP_BALANCE_INSTRUMENTATION
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t last_gc_end_time_us;
-#endif //HEAP_BALANCE_INSTRUMENTATION
-
-#ifdef USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool enable_special_regions_p;
-#else //USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_segment_size;
-
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_uoh_segment_size;
-#endif //!USE_REGIONS
-
- // For regions this is for region size.
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_segment_size_shr;
- // For SOH we always allocate segments of the same
- // size unless no_gc_region requires larger ones.
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t soh_segment_size;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t segment_info_size;
-
- // TODO: for regions we should be able to just get rid of these - they don't change and
- // we can just use g_gc_lowest_address/g_gc_highest_address instead
- PER_HEAP_FIELD_INIT_ONLY uint8_t* lowest_address;
- PER_HEAP_FIELD_INIT_ONLY uint8_t* highest_address;
+ // For dprintf in do_pre_gc
+ PER_HEAP_FIELD_DIAG_ONLY size_t allocated_since_last_gc[total_oh_count];
-#ifndef USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC BOOL ephemeral_promotion;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* saved_ephemeral_plan_start[ephemeral_generation_count];
- PER_HEAP_FIELD_SINGLE_GC size_t saved_ephemeral_plan_start_size[ephemeral_generation_count];
-#endif //!USE_REGIONS
+ PER_HEAP_FIELD_DIAG_ONLY fgm_history fgm_result;
-protected:
#ifdef MULTIPLE_HEAPS
- PER_HEAP_FIELD_INIT_ONLY GCHeap* vm_heap;
- PER_HEAP_FIELD_INIT_ONLY int heap_number;
- PER_HEAP_FIELD_SINGLE_GC_ALLOC VOLATILE(int) alloc_context_count;
+#ifdef _DEBUG
+ PER_HEAP_FIELD_DIAG_ONLY size_t committed_by_oh_per_heap[total_oh_count];
+#endif //_DEBUG
#else //MULTIPLE_HEAPS
-#define vm_heap ((GCHeap*) g_theGCHeap)
-#define heap_number (0)
#endif //MULTIPLE_HEAPS
- // Only used for dprintf
- PER_HEAP_FIELD_DIAG_ONLY uint64_t time_bgc_last;
-
-#ifdef USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint8_t* gc_low; // low end of the lowest region being condemned
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint8_t* gc_high; // high end of the highest region being condemned
-#else // USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC uint8_t* gc_low; // lowest address being condemned
- PER_HEAP_FIELD_SINGLE_GC uint8_t* gc_high; // highest address being condemned
-#endif //USE_REGIONS
-
- PER_HEAP_FIELD_SINGLE_GC size_t mark_stack_tos;
- PER_HEAP_FIELD_SINGLE_GC size_t mark_stack_bos;
-
- // These are loosely maintained, ie, could be reinitialized at any GC if needed. All that's
- // maintained is just the # of elements in mark_stack_array.
- PER_HEAP_FIELD_MAINTAINED size_t mark_stack_array_length;
- PER_HEAP_FIELD_MAINTAINED mark* mark_stack_array;
-
-#if defined (_DEBUG) && defined (VERIFY_HEAP)
- PER_HEAP_FIELD_SINGLE_GC BOOL verify_pinned_queue_p;
-#endif // _DEBUG && VERIFY_HEAP
-
- PER_HEAP_FIELD_SINGLE_GC uint8_t* oldest_pinned_plug;
-
- PER_HEAP_FIELD_SINGLE_GC size_t num_pinned_objects;
-
-#ifdef FEATURE_LOH_COMPACTION
- PER_HEAP_FIELD_SINGLE_GC size_t loh_pinned_queue_tos;
- PER_HEAP_FIELD_SINGLE_GC size_t loh_pinned_queue_bos;
-
- // These are loosely maintained, ie, could be reinitialized at any GC if needed. All that's
- // maintained is the # of elements in loh_pinned_queue, and when we should delete loh_pinned_queue
- // if LOH compaction hasn't happened for a while
- PER_HEAP_FIELD_MAINTAINED size_t loh_pinned_queue_length;
- PER_HEAP_FIELD_MAINTAINED int loh_pinned_queue_decay;
- PER_HEAP_FIELD_MAINTAINED mark* loh_pinned_queue;
-
- // This is for forced LOH compaction via the complus env var
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY BOOL loh_compaction_always_p;
-
- // This is set by the user in SetLOHCompactionMode and modified during a GC.
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_loh_compaction_mode loh_compaction_mode;
-
- // We may not compact LOH on every heap if we can't
- // grow the pinned queue. This is to indicate whether
- // this heap's LOH is compacted or not. So even if
- // settings.loh_compaction is TRUE this may not be TRUE.
- PER_HEAP_FIELD_SINGLE_GC BOOL loh_compacted_p;
-#endif //FEATURE_LOH_COMPACTION
-
#ifdef BACKGROUND_GC
- // Loosely maintained. Can change if the BGC thread times out and re-created.
- PER_HEAP_FIELD_MAINTAINED EEThreadId bgc_thread_id;
-
-#ifdef WRITE_WATCH
- PER_HEAP_FIELD_SINGLE_GC uint8_t* background_written_addresses [array_size+2];
-#endif //WRITE_WATCH
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(c_gc_state) current_c_gc_state;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_mechanisms saved_bgc_settings;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_history_global bgc_data_global;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(BOOL) gc_background_running;
-
- PER_HEAP_FIELD_SINGLE_GC gc_history_per_heap bgc_data_per_heap;
-
- // Maintained and remains TRUE as long as the BGC thread doesn't timeout
- PER_HEAP_FIELD_MAINTAINED BOOL bgc_thread_running; // gc thread is its main loop
-
- // Only matters if we need to timeout BGC threads
- PER_HEAP_ISOLATED_FIELD_MAINTAINED BOOL keep_bgc_threads_p;
-
- // This event is used by BGC threads to do something on
- // one specific thread while other BGC threads have to
- // wait. This is different from a join 'cause you can't
- // specify which thread should be doing some task
- // while other threads have to wait.
- // For example, to make the BGC threads managed threads
- // we need to create them on the thread that called
- // SuspendEE which is heap 0.
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent bgc_threads_sync_event;
-
- // Maintained and remains the same as long as the BGC thread doesn't timeout
- PER_HEAP_FIELD_MAINTAINED Thread* bgc_thread;
-
- // We only use this when we need to timeout BGC threads.
- PER_HEAP_FIELD_INIT_ONLY CLRCriticalSection bgc_threads_timeout_cs;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent background_gc_done_event;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent ee_proceed_event;
-
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool gc_can_use_concurrent;
-
- // Only changed by API
- PER_HEAP_ISOLATED_FIELD bool temp_disable_concurrent_p;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL do_ephemeral_gc_p;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL do_concurrent_p;
-
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(bgc_state) current_bgc_state;
+ PER_HEAP_FIELD_DIAG_ONLY gc_history_per_heap bgc_data_per_heap;
struct gc_history
{
};
#define max_history_count 64
-
PER_HEAP_FIELD_DIAG_ONLY int gchist_index_per_heap;
PER_HEAP_FIELD_DIAG_ONLY gc_history gchist_per_heap[max_history_count];
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int gchist_index;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY gc_mechanisms_store gchist[max_history_count];
PER_HEAP_FIELD_DIAG_ONLY size_t bgc_overflow_count;
- PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_loh_size;
- PER_HEAP_FIELD_SINGLE_GC size_t bgc_begin_poh_size;
- PER_HEAP_FIELD_SINGLE_GC size_t end_loh_size;
- PER_HEAP_FIELD_SINGLE_GC size_t end_poh_size;
-
-#ifdef BGC_SERVO_TUNING
- PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_no_bgc;
- PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_bgc_marking;
- PER_HEAP_FIELD_SINGLE_GC uint64_t loh_a_bgc_planning;
-
- // Total allocated last BGC's plan + between last and this bgc +
- // this bgc's mark
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint64_t total_loh_a_last_bgc;
-
- PER_HEAP_FIELD_SINGLE_GC size_t bgc_maxgen_end_fl_size;
-#endif //BGC_SERVO_TUNING
-
- // This includes what we allocate at the end of segment - allocating
- // in free list doesn't increase the heap size.
- PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_loh_size_increased;
- PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t bgc_poh_size_increased;
-
// Used in ApproxTotalBytesInUse
PER_HEAP_FIELD_DIAG_ONLY size_t background_soh_size_end_mark;
+#endif //BACKGROUND_GC
- // Updated by the allocator and reinit-ed in each BGC
- PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_soh_alloc_count;
- PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t background_uoh_alloc_count;
-
- PER_HEAP_FIELD_SINGLE_GC_ALLOC VOLATILE(int32_t) uoh_alloc_thread_count;
+#ifdef USE_REGIONS
+ // Used in a single GC.
+ PER_HEAP_FIELD_DIAG_ONLY int regions_per_gen[max_generation + 1];
- // These are loosely maintained, same deal as mark_stack_array, the length is maintained
- // but can be reinit-ed if needed. But otherwise they are used in a single GC.
- // TODO: note that we do reinit background_mark_stack_tos in grow_bgc_mark_stack but there's really
- // no need because we always initialize it on entry of background_mark_simple1
- PER_HEAP_FIELD_MAINTAINED uint8_t** background_mark_stack_tos;
- PER_HEAP_FIELD_MAINTAINED uint8_t** background_mark_stack_array;
- PER_HEAP_FIELD_MAINTAINED size_t background_mark_stack_array_length;
+#ifdef STRESS_REGIONS
+ // TODO: could consider dynamically grow this.
+ // Right now the way it works -
+ // For each gen0 region, pin an object somewhere near the beginning and middle.
+ // When we run out start replacing objects pinned by the earlier handles.
+#define PINNING_HANDLE_INITIAL_LENGTH 128
+ PER_HEAP_FIELD_DIAG_ONLY OBJECTHANDLE* pinning_handles_for_alloc;
+ PER_HEAP_FIELD_DIAG_ONLY int ph_index_per_heap;
+ PER_HEAP_FIELD_DIAG_ONLY int pinning_seg_interval;
+ PER_HEAP_FIELD_DIAG_ONLY size_t num_gen0_regions;
+ PER_HEAP_FIELD_DIAG_ONLY int sip_seg_interval;
+ PER_HEAP_FIELD_DIAG_ONLY int sip_seg_maxgen_interval;
+ PER_HEAP_FIELD_DIAG_ONLY size_t num_condemned_regions;
+#endif //STRESS_REGIONS
+#endif //USE_REGIONS
- // We can't process the ephemeral range concurrently so we
- // wait till final mark to process it.
- PER_HEAP_FIELD_SINGLE_GC BOOL processed_eph_overflow_p;
+#ifdef FEATURE_EVENT_TRACE
+#define max_etw_item_count 2000
-#ifdef USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC BOOL background_overflow_p;
-#else
- PER_HEAP_FIELD_SINGLE_GC uint8_t* background_min_overflow_address;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* background_max_overflow_address;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* background_min_soh_overflow_address;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* background_max_soh_overflow_address;
- PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_overflow_ephemeral_seg;
- PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_sweep_ephemeral_seg;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* saved_sweep_ephemeral_start;
-#endif //!USE_REGIONS
-
- // For regions these are the same as g_gc_lowest_address/g_gc_highest_address
- // and never change.
- PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_lowest_address;
- PER_HEAP_FIELD_INIT_ONLY uint8_t* background_saved_highest_address;
-
- // This is used for synchronization between the bgc thread
- // for this heap and the user threads allocating on this
- // heap.
- PER_HEAP_FIELD_INIT_ONLY exclusive_sync* bgc_alloc_lock;
-
-#ifdef SNOOP_STATS
- PER_HEAP_FIELD_SINGLE_GC snoop_stats_data snoop_stat;
-#endif //SNOOP_STATS
-
- // Loosedly maintained, can be reinit-ed in background_grow_c_mark_list.
- PER_HEAP_FIELD_MAINTAINED uint8_t** c_mark_list;
- PER_HEAP_FIELD_MAINTAINED size_t c_mark_list_length;
-
- // This is marked as SINGLE_GC, as in, it's initialized in each BGC and used throughout that BGC.
- // But the ephemeral GCs that happen during this BGC so in that sense it can be used in multiple GCs.
- PER_HEAP_FIELD_SINGLE_GC size_t c_mark_list_index;
-#endif //BACKGROUND_GC
-
- PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list;
-
- // Loosely maintained,can be reinit-ed in grow_mark_list.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t mark_list_size;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC bool mark_list_overflow;
-
- PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list_end;
- PER_HEAP_FIELD_SINGLE_GC uint8_t** mark_list_index;
-
- // Loosely maintained,can be reinit-ed in grow_mark_list.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t** g_mark_list;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t** g_mark_list_copy;
-
- PER_HEAP_FIELD_SINGLE_GC uint8_t*** mark_list_piece_start;
- PER_HEAP_FIELD_SINGLE_GC uint8_t*** mark_list_piece_end;
-#ifdef USE_REGIONS
- // REGIONS TODO: these are allocated separately but should really be part
- // of GC's book keeping datastructures.
- // Loosely maintained, can be reinit-ed in grow_mark_list_piece
- PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t g_mark_list_piece_size;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t*** g_mark_list_piece;
-#endif //USE_REGIONS
-
- PER_HEAP_FIELD_SINGLE_GC uint8_t* min_overflow_address;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* max_overflow_address;
-
-#ifndef MULTIPLE_HEAPS
- PER_HEAP_FIELD_SINGLE_GC uint8_t* shigh; //keeps track of the highest marked object
- PER_HEAP_FIELD_SINGLE_GC uint8_t* slow; //keeps track of the lowest marked object
-#endif //MULTIPLE_HEAPS
+ enum etw_bucket_kind
+ {
+ largest_fl_items = 0,
+ plugs_in_condemned = 1
+ };
- // calculated at the end of a GC and used in allocator
- PER_HEAP_FIELD_SINGLE_GC_ALLOC size_t allocation_quantum;
+ // This is for gen2 FL purpose so it would use sizes for gen2 buckets.
+ // This event is only to give us a rough idea of the largest gen2 fl
+ // items or plugs that we had to allocate in condemned. We only fire
+ // these events on verbose level and stop at max_etw_item_count items.
+ PER_HEAP_FIELD_DIAG_ONLY etw_bucket_info bucket_info[NUM_GEN2_ALIST];
+#endif //FEATURE_EVENT_TRACE
- PER_HEAP_FIELD_SINGLE_GC size_t alloc_contexts_used;
+#ifdef SPINLOCK_HISTORY
+#define max_saved_spinlock_info 48
+ PER_HEAP_FIELD_DIAG_ONLY int spinlock_info_index;
+ PER_HEAP_FIELD_DIAG_ONLY spinlock_info last_spinlock_info[max_saved_spinlock_info + 8];
+#endif //SPINLOCK_HISTORY
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC no_gc_region_info current_no_gc_region_info;
+#ifdef RECORD_LOH_STATE
+#define max_saved_loh_states 12
+ struct loh_state_info
+ {
+ allocation_state alloc_state;
+ EEThreadId thread_id;
+ };
+ PER_HEAP_FIELD_DIAG_ONLY int loh_state_index;
+ PER_HEAP_FIELD_DIAG_ONLY loh_state_info last_loh_states[max_saved_loh_states];
+#endif //RECORD_LOH_STATE
- PER_HEAP_FIELD_ALLOC size_t soh_allocation_no_gc;
- PER_HEAP_FIELD_ALLOC size_t loh_allocation_no_gc;
+ // This field is used by the !sos.AnalyzeOOM command and is updated by the last OOM GC sees.
+ PER_HEAP_FIELD_DIAG_ONLY oom_history oom_info;
- PER_HEAP_FIELD_SINGLE_GC bool no_gc_oom_p;
- PER_HEAP_FIELD_SINGLE_GC heap_segment* saved_loh_segment_no_gc;
+#define max_oom_history_count 4
+ PER_HEAP_FIELD_DIAG_ONLY int oomhist_index_per_heap;
+ PER_HEAP_FIELD_DIAG_ONLY oom_history oomhist_per_heap[max_oom_history_count];
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL proceed_with_gc_p;
+ PER_HEAP_FIELD_DIAG_ONLY size_t interesting_data_per_gc[max_idp_count];
+ // The following 4 fields are used by the !sos.DumpGCData command.
+ PER_HEAP_FIELD_DIAG_ONLY size_t interesting_data_per_heap[max_idp_count];
+ PER_HEAP_FIELD_DIAG_ONLY size_t compact_reasons_per_heap[max_compact_reasons_count];
+ PER_HEAP_FIELD_DIAG_ONLY size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
+ PER_HEAP_FIELD_DIAG_ONLY size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
-#ifdef MULTIPLE_HEAPS
- // Also updated on the heap#0 GC thread because that's where we are actually doing the decommit.
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL gradual_decommit_in_progress_p;
+ // The following 3 fields are used in a single GC when we detect that the DAC notification has been
+ // enabled to find roots for the !sos.FindRoots command.
+ PER_HEAP_FIELD_DIAG_ONLY uint8_t** internal_root_array;
+ PER_HEAP_FIELD_DIAG_ONLY size_t internal_root_array_index;
+ PER_HEAP_FIELD_DIAG_ONLY BOOL heap_analyze_success;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t max_decommit_step_size;
-#endif //MULTIPLE_HEAPS
+#ifdef HEAP_ANALYZE
+ PER_HEAP_FIELD_DIAG_ONLY size_t internal_root_array_length;
-#define youngest_generation (generation_of (0))
-#define large_object_generation (generation_of (loh_generation))
-#define pinned_object_generation (generation_of (poh_generation))
+ // next two fields are used to optimize the search for the object
+ // enclosing the current reference handled by ha_mark_object_simple.
+ PER_HEAP_FIELD_DIAG_ONLY uint8_t* current_obj;
+ PER_HEAP_FIELD_DIAG_ONLY size_t current_obj_size;
+#endif //HEAP_ANALYZE
- // The more_space_lock and gc_lock is used for 3 purposes:
- //
- // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock_soh)
- // 2) to synchronize allocations of large objects (more_space_lock_uoh)
- // 3) to synchronize the GC itself (gc_lock)
- //
- PER_HEAP_ISOLATED_FIELD_MAINTAINED GCSpinLock gc_lock; //lock while doing GC
+ PER_HEAP_FIELD_DIAG_ONLY gen_to_condemn_tuning gen_to_condemn_reasons;
+ PER_HEAP_FIELD_DIAG_ONLY size_t etw_allocation_running_amount[total_oh_count];
+ PER_HEAP_FIELD_DIAG_ONLY uint64_t total_alloc_bytes_soh;
+ PER_HEAP_FIELD_DIAG_ONLY uint64_t total_alloc_bytes_uoh;
- // Note that for Server GC we do release this lock on the heap#0 GC thread after we are done with the GC work.
- PER_HEAP_FIELD_ALLOC GCSpinLock more_space_lock_soh; //lock while allocating more space for soh
+ // Used in a single GC.
+ PER_HEAP_FIELD_DIAG_ONLY size_t num_pinned_objects;
- PER_HEAP_FIELD_ALLOC GCSpinLock more_space_lock_uoh;
+#if defined(_DEBUG) && defined(VERIFY_HEAP)
+ // Used in a single GC.
+ PER_HEAP_FIELD_DIAG_ONLY BOOL verify_pinned_queue_p;
+#endif //_DEBUG && VERIFY_HEAP
#ifdef SYNCHRONIZATION_STATS
PER_HEAP_FIELD_DIAG_ONLY unsigned int good_suspension;
PER_HEAP_FIELD_DIAG_ONLY unsigned int num_msl_acquired;
// Total cycles it takes to acquire the more_space_lock.
PER_HEAP_FIELD_DIAG_ONLY uint64_t total_msl_acquire;
-
- PER_HEAP_METHOD void init_heap_sync_stats()
- {
- good_suspension = 0;
- bad_suspension = 0;
- num_msl_acquired = 0;
- total_msl_acquire = 0;
- num_high_msl_acquire = 0;
- num_low_msl_acquire = 0;
- more_space_lock.init();
- gc_lock.init();
- }
-
- PER_HEAP_METHOD void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log)
- {
- printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n",
- heap_num,
- alloc_contexts_used,
- good_suspension,
- bad_suspension,
- (unsigned int)(total_msl_acquire / gc_count_during_log),
- num_high_msl_acquire / gc_count_during_log,
- num_low_msl_acquire / gc_count_during_log,
- num_msl_acquired / gc_count_during_log,
- more_space_lock.num_switch_thread / gc_count_during_log,
- more_space_lock.num_wait_longer / gc_count_during_log,
- more_space_lock.num_switch_thread_w / gc_count_during_log,
- more_space_lock.num_disable_preemptive_w / gc_count_during_log);
- }
-
#endif //SYNCHRONIZATION_STATS
- // These *alloc_list fields are init-ed once and used throughput process lifetime, they contained fields
- // that are maintained via these generations' free_list_allocator
-#define NUM_LOH_ALIST (7)
- // bucket 0 contains sizes less than 64*1024
- // the "BITS" number here is the highest bit in 64*1024 - 1, zero-based as in BitScanReverse.
- // see first_suitable_bucket(size_t size) for details.
-#define BASE_LOH_ALIST_BITS (15)
- PER_HEAP_FIELD_MAINTAINED_ALLOC alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
+ /*****************************************************************************************************************/
+ // global fields //
+ /*****************************************************************************************************************/
-#define NUM_GEN2_ALIST (12)
-#ifdef HOST_64BIT
- // bucket 0 contains sizes less than 256
-#define BASE_GEN2_ALIST_BITS (7)
-#else
- // bucket 0 contains sizes less than 128
-#define BASE_GEN2_ALIST_BITS (6)
-#endif // HOST_64BIT
- PER_HEAP_FIELD_MAINTAINED alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
+ /********************************************/
+ // PER_HEAP_ISOLATED_FIELD_SINGLE_GC fields //
+ /********************************************/
-#define NUM_POH_ALIST (19)
- // bucket 0 contains sizes less than 256
-#define BASE_POH_ALIST_BITS (7)
- PER_HEAP_FIELD_MAINTAINED_ALLOC alloc_list poh_alloc_list[NUM_POH_ALIST-1];
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC bool mark_list_overflow;
-#ifdef DOUBLY_LINKED_FL
- // For bucket 0 added list, we don't want to have to go through
- // it to count how many bytes it has so we keep a record here.
- // If we need to sweep in gen1, we discard this added list and
- // need to deduct the size from free_list_space.
- // Note that we should really move this and the free_list_space
- // accounting into the alloc_list class.
- PER_HEAP_FIELD_SINGLE_GC size_t gen2_removed_no_undo;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL proceed_with_gc_p;
-#define INVALID_SAVED_PINNED_PLUG_INDEX ((size_t)~0)
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC bool maxgen_size_inc_p;
- PER_HEAP_FIELD_SINGLE_GC size_t saved_pinned_plug_index;
-#endif //DOUBLY_LINKED_FL
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL g_low_memory_status;
-#ifdef FEATURE_EVENT_TRACE
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC bool informational_event_enabled_p;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(bool) internal_gc_done;
- // Time is all in microseconds here. These are times recorded during STW.
- //
- // Note that the goal of this is not to show every single type of roots
- // For that you have the per heap MarkWithType events. This takes advantage
- // of the joins we already have and naturally gets the time between each
- // join.
- enum etw_gc_time_info
- {
- time_mark_sizedref = 0,
- // Note time_mark_roots does not include scanning sizedref handles.
- time_mark_roots = 1,
- time_mark_short_weak = 2,
- time_mark_scan_finalization = 3,
- time_mark_long_weak = 4,
- max_bgc_time_type = 5,
- time_plan = 5,
- time_relocate = 6,
- time_sweep = 6,
- max_sweep_time_type = 7,
- time_compact = 7,
- max_compact_time_type = 8
- };
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_mechanisms settings;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t* gc_time_info;
+#ifdef MULTIPLE_HEAPS
+ // These 2 fields' values do not change but are set/unset per GC
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent gc_start_event;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent ee_suspend_event;
+
+ // Also updated on the heap#0 GC thread because that's where we are actually doing the decommit.
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL gradual_decommit_in_progress_p;
+#ifdef MH_SC_MARK
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC int* g_mark_stack_busy;
+#endif //MH_SC_MARK
+#if !defined(USE_REGIONS) || defined(_DEBUG)
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t* g_promoted;
+#endif //!USE_REGIONS || _DEBUG
#ifdef BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t* bgc_time_info;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t* g_bpromoted;
#endif //BACKGROUND_GC
+#else //MULTIPLE_HEAPS
+#if !defined(USE_REGIONS) || defined(_DEBUG)
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t g_promoted;
+#endif //!USE_REGIONS || _DEBUG
+#ifdef BACKGROUND_GC
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t g_bpromoted;
+#endif //BACKGROUND_GC
+#endif //MULTIPLE_HEAPS
- PER_HEAP_ISOLATED_METHOD void record_mark_time (uint64_t& mark_time,
- uint64_t& current_mark_time,
- uint64_t& last_mark_time);
-
-#define max_etw_item_count 2000
-
- enum etw_bucket_kind
- {
- largest_fl_items = 0,
- plugs_in_condemned = 1
- };
+#ifdef BACKGROUND_GC
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(c_gc_state) current_c_gc_state;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_mechanisms saved_bgc_settings;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(BOOL) gc_background_running;
- // This is for gen2 FL purpose so it would use sizes for gen2 buckets.
- // This event is only to give us a rough idea of the largest gen2 fl
- // items or plugs that we had to allocate in condemned. We only fire
- // these events on verbose level and stop at max_etw_item_count items.
- PER_HEAP_FIELD_DIAG_ONLY etw_bucket_info bucket_info[NUM_GEN2_ALIST];
-
- PER_HEAP_METHOD void init_bucket_info();
-
- PER_HEAP_METHOD void add_plug_in_condemned_info (generation* gen, size_t plug_size);
+ // This event is used by BGC threads to do something on
+ // one specific thread while other BGC threads have to
+ // wait. This is different from a join 'cause you can't
+ // specify which thread should be doing some task
+ // while other threads have to wait.
+ // For example, to make the BGC threads managed threads
+ // we need to create them on the thread that called
+ // SuspendEE which is heap 0.
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent bgc_threads_sync_event;
- PER_HEAP_METHOD void fire_etw_allocation_event (size_t allocation_amount,
- int gen_number,
- uint8_t* object_address,
- size_t object_size);
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent background_gc_done_event;
- PER_HEAP_METHOD void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject);
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent ee_proceed_event;
- // config stuff and only init-ed once at the beginning.
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t physical_memory_from_config;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t gen0_min_budget_from_config;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t gen0_max_budget_from_config;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int high_mem_percent_from_config;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool use_frozen_segments_p;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool hard_limit_config_p;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL do_ephemeral_gc_p;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL do_concurrent_p;
-#ifdef FEATURE_LOH_COMPACTION
- // This records the LOH compaction info -
- // time it takes to plan, relocate and compact.
- // We want to see how reference rich large objects are so
- // we also record ref info. Survived bytes are already recorded
- // in gc_generation_data of the perheap history event.
- //
- // If we don't end up actually doing LOH compaction because plan
- // failed, the time would all be 0s.
- struct etw_loh_compact_info
- {
- uint32_t time_plan;
- uint32_t time_compact;
- uint32_t time_relocate;
- size_t total_refs;
- size_t zero_refs;
- };
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint32_t cm_in_progress;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY etw_loh_compact_info* loh_compact_info;
+ // normally this is FALSE; we set it to TRUE at the end of the gen1 GC
+ // we do right before the bgc starts.
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL dont_restart_ee_p;
- PER_HEAP_METHOD void loh_reloc_survivor_helper (uint8_t** pval,
- size_t& total_refs,
- size_t& zero_refs);
-#endif //FEATURE_LOH_COMPACTION
-#endif //FEATURE_EVENT_TRACE
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC GCEvent bgc_start_event;
- // TODO: actually a couple of entries in these elements are carried over from GC to GC -
- // collect_count and previous_time_clock. It'd be nice to isolate these out.
- // Only field used by allocation is new_allocation.
- PER_HEAP_FIELD_SINGLE_GC_ALLOC dynamic_data dynamic_data_table[total_generation_count];
+#ifdef BGC_SERVO_TUNING
+ // Total allocated last BGC's plan + between last and this bgc +
+ // this bgc's mark
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint64_t total_loh_a_last_bgc;
+#endif //BGC_SERVO_TUNING
+#endif //BACKGROUND_GC
- PER_HEAP_FIELD_DIAG_ONLY gc_history_per_heap gc_data_per_heap;
+#ifdef USE_REGIONS
+ // Initialized in a blocking GC at the beginning of the mark phase
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t region_count;
- PER_HEAP_FIELD_SINGLE_GC size_t total_promoted_bytes;
+ // Highest and lowest address for ephemeral generations.
+ // For regions these are only used during a GC (init-ed at beginning of mark and
+ // used later in that GC).
+ // They could be used for WB but we currently don't use them for that purpose, even
+ // thought we do pass them to the WB code.
+ //
+ // For segments these are per heap fields.
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(uint8_t*) ephemeral_low;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(uint8_t*) ephemeral_high;
- PER_HEAP_FIELD_SINGLE_GC size_t finalization_promoted_bytes;
+ // For segments these are per heap fields
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint8_t* gc_low; // low end of the lowest region being condemned
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint8_t* gc_high; // high end of the highest region being condemned
+#endif //USE_REGIONS
- // TODO! This is not updated for regions and should be!
- PER_HEAP_FIELD_DIAG_ONLY size_t maxgen_pinned_compact_before_advance;
+ /**************************************************/
+ // PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC fields //
+ /**************************************************/
- // dynamic tuning.
- PER_HEAP_METHOD BOOL dt_low_ephemeral_space_p (gc_tuning_point tp);
- // if elevate_p is FALSE, it means we are determining fragmentation for a generation
- // to see if we should condemn this gen; otherwise it means we are determining if
- // we should elevate to doing max_gen from an ephemeral gen.
- PER_HEAP_METHOD BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
- PER_HEAP_METHOD BOOL dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
- PER_HEAP_METHOD BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
- PER_HEAP_METHOD BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp);
+ // The following 2 events are there to support the gen2 GC notification which is only fired if a full blocking GC
+ // is about to happen
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC GCEvent full_gc_approach_event;
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC GCEvent full_gc_end_event;
- // This one is unusual, it's calculated in one GC and used in the next GC. so it's maintained
- // but only maintained till the next GC.
- PER_HEAP_FIELD_MAINTAINED int generation_skip_ratio;//in %
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC_ALLOC no_gc_region_info current_no_gc_region_info;
-#ifdef FEATURE_CARD_MARKING_STEALING
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_eph_soh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_gen_soh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_eph_loh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(size_t) n_gen_loh;
-#endif //FEATURE_CARD_MARKING_STEALING
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC uint64_t entry_available_physical_mem;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY int generation_skip_ratio_threshold;
+#ifdef FEATURE_LOH_COMPACTION
+ // This is set by the user in SetLOHCompactionMode and modified during a GC.
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC gc_loh_compaction_mode loh_compaction_mode;
+#endif //FEATURE_LOH_COMPACTION
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY int conserve_mem_setting;
+ /*********************************************/
+ // PER_HEAP_ISOLATED_FIELD_MAINTAINED fields //
+ /*********************************************/
- PER_HEAP_FIELD_MAINTAINED_ALLOC BOOL gen0_bricks_cleared;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED GCSpinLock gc_lock; //lock while doing GC
- // This is also changed by find_object
- PER_HEAP_FIELD_MAINTAINED int gen0_must_clear_bricks;
+ // Loosely maintained,can be reinit-ed in grow_mark_list.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t mark_list_size;
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC bool maxgen_size_inc_p;
+ // Loosely maintained,can be reinit-ed in grow_mark_list.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t** g_mark_list;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t** g_mark_list_copy;
// The elements of this array are updated as each type of GC happens.
PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t full_gc_counts[gc_type_max];
- // the # of bytes allocates since the last full compacting GC, maintained by the allocator and
- // reset during the next full compacting GC
- PER_HEAP_FIELD_SINGLE_GC_ALLOC uint64_t loh_alloc_since_cg;
-
- PER_HEAP_FIELD_SINGLE_GC BOOL elevation_requested;
-
- // if this is TRUE, we should always guarantee that we do a
- // full compacting GC before we OOM.
- // set by the allocator/GC and cleared during a full blocking GC
- PER_HEAP_FIELD_SINGLE_GC_ALLOC BOOL last_gc_before_oom;
-
-#ifndef USE_REGIONS
- // Set in one GC and updated in the next GC.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED BOOL should_expand_in_full_gc;
-#endif //!USE_REGIONS
-
- // When we decide if we should expand the heap or not, we are
- // fine NOT to expand if we find enough free space in gen0's free
- // list or end of seg and we check this in decide_on_compacting.
- // This is an expensive check so we just record the fact and not
- // need to check in the allocator again.
- //
- // Set during a GC and checked by allocator after that GC
- PER_HEAP_FIELD_SINGLE_GC BOOL sufficient_gen0_space_p;
-
-#ifdef MULTIPLE_HEAPS
- // Init-ed during a GC and updated by allocator after that GC
- PER_HEAP_FIELD_SINGLE_GC_ALLOC bool gen0_allocated_after_gc_p;
-#endif //MULTIPLE_HEAPS
-
// A provisional mode means we could change our mind in the middle of a GC
// and want to do a different GC instead.
//
// full GC instead (without restarting EE).
PER_HEAP_ISOLATED_FIELD_MAINTAINED bool provisional_mode_triggered;
- // It's maintained but only till the very next GC. When this is set in a GC, it will be cleared
- // in the very next GC done with garbage_collect_pm_full_gc.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED bool pm_trigger_full_gc;
+ // It's maintained but only till the very next GC. When this is set in a GC, it will be cleared
+ // in the very next GC done with garbage_collect_pm_full_gc.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED bool pm_trigger_full_gc;
+
+ // This is smoothed "desired_per_heap", ie, smoothed budget. Only used in a join.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t smoothed_desired_per_heap[total_generation_count];
+
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint64_t gc_last_ephemeral_decommit_time;
+
+ // maintained as we need to grow bookkeeping data.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t card_table_element_layout[total_bookkeeping_elements + 1];
+
+#ifdef BACKGROUND_GC
+ // Only matters if we need to timeout BGC threads
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED BOOL keep_bgc_threads_p;
+#endif //BACKGROUND_GC
+
+#ifdef USE_REGIONS
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t* bookkeeping_covered_committed;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t bookkeeping_sizes[total_bookkeeping_elements];
+
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED region_info* map_region_to_generation;
+ // same table as above, but skewed so that we can index
+ // directly with address >> min_segment_size_shr
+ // This is passed the write barrier code.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED region_info* map_region_to_generation_skewed;
+
+ // REGIONS TODO: these are allocated separately but should really be part
+ // of GC's book keeping datastructures.
+ // Loosely maintained, can be reinit-ed in grow_mark_list_piece
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t g_mark_list_piece_size;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t*** g_mark_list_piece;
+
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED region_free_list global_regions_to_decommit[count_free_region_kinds];
+
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED region_free_list global_free_huge_regions;
+#else //USE_REGIONS
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED heap_segment* segment_standby_list;
+
+ // Set in one GC and updated in the next GC.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED BOOL should_expand_in_full_gc;
+#endif //USE_REGIONS
+
+ /****************************************************/
+ // PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC fields //
+ /****************************************************/
+
+ // See comments for heap_hard_limit.
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC size_t current_total_committed;
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED_ALLOC size_t committed_by_oh[recorded_committed_bucket_counts];
+
+ /********************************************/
+ // PER_HEAP_ISOLATED_FIELD_INIT_ONLY fields //
+ /********************************************/
+
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY gc_latency_level latency_level;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t high_memory_load_th;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t m_high_memory_load_th;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint32_t v_high_memory_load_th;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool is_restricted_physical_mem;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint64_t mem_one_percent;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint64_t total_physical_mem;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY int generation_skip_ratio_threshold;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY int conserve_mem_setting;
+
+ // For SOH we always allocate segments of the same
+ // size unless no_gc_region requires larger ones.
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t soh_segment_size;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t segment_info_size;
+
+ // Hard limit for the heap, only supported on 64-bit.
+ //
+ // Users can specify a hard limit for the GC heap via GCHeapHardLimit or
+ // a percentage of the physical memory this process is allowed to use via
+ // GCHeapHardLimitPercent. This is the maximum commit size the GC heap
+ // can consume.
+ //
+ // The way the hard limit is decided is:
+ //
+ // If the GCHeapHardLimit config is specified that's the value we use;
+ // else if the GCHeapHardLimitPercent config is specified we use that
+ // value;
+ // else if the process is running inside a container with a memory limit,
+ // the hard limit is
+ // max (20mb, 75% of the memory limit on the container).
+ //
+ // Due to the different perf charicteristics of containers we make the
+ // following policy changes:
+ //
+ // 1) No longer affinitize Server GC threads by default because we wouldn't
+ // want all the containers on the machine to only affinitize to use the
+ // first few CPUs (and we don't know which CPUs are already used). You
+ // can however override this by specifying the GCHeapAffinitizeMask
+ // config which will decide which CPUs the process will affinitize the
+ // Server GC threads to.
+ //
+ // 2) Segment size is determined by limit / number of heaps but has a
+ // minimum value of 16mb. This can be changed by specifying the number
+ // of heaps via the GCHeapCount config. The minimum size is to avoid
+ // the scenario where the hard limit is small but the process can use
+ // many procs and we end up with tiny segments which doesn't make sense.
+ //
+ // 3) LOH compaction occurs automatically if needed.
+ //
+ // Since we do allow both gen0 and gen3 allocations, and we don't know
+ // the distinction (and it's unrealistic to request users to specify
+ // this distribution) we reserve memory this way -
+ //
+ // For SOH we reserve (limit / number of heaps) per heap.
+ // For LOH we reserve (limit * 2 / number of heaps) per heap.
+ //
+ // This means the following -
+ //
+ // + we never need to acquire new segments. This simplies the perf
+ // calculations by a lot.
+ //
+ // + we now need a different definition of "end of seg" because we
+ // need to make sure the total does not exceed the limit.
+ //
+ // + if we detect that we exceed the commit limit in the allocator we
+ // wouldn't want to treat that as a normal commit failure because that
+ // would mean we always do full compacting GCs.
+ //
+ // TODO: some of the logic here applies to the general case as well
+ // such as LOH automatic compaction. However it will require more
+ //testing to change the general case.
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t heap_hard_limit_oh[total_oh_count];
+
+ // Used both in a GC and on the allocator code paths when heap_hard_limit is non zero
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY CLRCriticalSection check_commit_cs;
+
+ // Indicate to use large pages. This only works if hardlimit is also enabled.
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p;
+
+#ifdef MULTIPLE_HEAPS
+ // Init-ed in gc_heap::initialize_gc
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY gc_heap** g_heaps;
+
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool gc_thread_no_affinitize_p;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_gen0_balance_delta;
+
+#define alloc_quantum_balance_units (16)
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_balance_threshold;
+
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t max_decommit_step_size;
+#else //MULTIPLE_HEAPS
+#endif //MULTIPLE_HEAPS
+
+#ifdef BACKGROUND_GC
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool gc_can_use_concurrent;
+
+#ifdef BGC_SERVO_TUNING
+ // This tells us why we chose to do a bgc in tuning.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int saved_bgc_tuning_reason;
+#endif //BGC_SERVO_TUNING
+#endif //BACKGROUND_GC
+
+#ifdef USE_REGIONS
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t regions_range;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool enable_special_regions_p;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint8_t* bookkeeping_covered_start;
+#else //USE_REGIONS
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t eph_gen_starts_size;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_segment_size;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_uoh_segment_size;
+#endif //USE_REGIONS
+
+#if defined(SHORT_PLUGS) && !defined(USE_REGIONS)
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY double short_plugs_pad_ratio;
+#endif //SHORT_PLUGS && !USE_REGIONS
+
+#ifdef FEATURE_LOH_COMPACTION
+ // This is for forced LOH compaction via the complus env var
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY BOOL loh_compaction_always_p;
+#endif //FEATURE_LOH_COMPACTION
+
+#ifdef HOST_64BIT
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t youngest_gen_desired_th;
+#endif //HOST_64BIT
+
+ /********************************************/
+ // PER_HEAP_ISOLATED_FIELD_DIAG_ONLY fields //
+ /********************************************/
+
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int gchist_index;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY gc_mechanisms_store gchist[max_history_count];
- // For testing only BEG
// pm_stress_on currently means (since we just have one mode) we
// randomly turn the mode on; and after a random # of NGC2s we
// turn it off.
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool pm_stress_on; // init-ed by the GCProvModeStress config
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t provisional_triggered_gc_count;
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t provisional_off_gc_count;
- // For testing only END
-
+ // Only used in dprintf
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t num_provisional_triggered;
- // For dprintf in do_pre_gc
- PER_HEAP_FIELD_DIAG_ONLY size_t allocated_since_last_gc[total_oh_count];
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY gc_history_global gc_data_global;
+
+ // This is what GC uses for its own bookkeeping.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t current_total_committed_bookkeeping;
+
+ // For implementation of GCHeap::GetMemoryInfo which is called by
+ // the GC.GetGCMemoryInfo API
+ //
+ // We record the time GC work is done while EE is suspended.
+ // suspended_start_ts is what we get right before we call
+ // SuspendEE. We omit the time between GC end and RestartEE
+ // because it's very short and by the time we are calling it
+ // the settings may have changed and we'd have to do more work
+ // to figure out the right GC to record info of.
+ //
+ // The complications are the GCs triggered without their own
+ // SuspendEE, in which case we will record that GC's duration
+ // as its pause duration and the rest toward the GC that
+ // the SuspendEE was for. The ephemeral GC we might trigger
+ // at the beginning of a BGC and the PM triggered full GCs
+ // fall into this case.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t suspended_start_time;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t end_gc_time;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t total_suspended_time;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t process_start_time;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_ephemeral_gc_info;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_full_blocking_gc_info;
#ifdef BACKGROUND_GC
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY gc_history_global bgc_data_global;
+
// For the CollectionCount API
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t ephemeral_fgc_counts[max_generation];
- PER_HEAP_FIELD_SINGLE_GC uint8_t* next_sweep_obj;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* current_sweep_pos;
-#ifdef DOUBLY_LINKED_FL
- PER_HEAP_FIELD_SINGLE_GC heap_segment* current_sweep_seg;
-#endif //DOUBLY_LINKED_FL
+ // For implementation of GCHeap::GetMemoryInfo which is called by
+ // the GC.GetGCMemoryInfo API
+ //
+ // If the user didn't specify which kind of GC info to return, we need
+ // to return the last recorded one. There's a complication with BGC as BGC
+ // end runs concurrently. If 2 BGCs run back to back, we can't have one
+ // update the info while the user thread is reading it (and we'd still like
+ // to return the last BGC info otherwise if we only did BGCs we could frequently
+ // return nothing). So we maintain 2 of these for BGC and the older one is
+ // guaranteed to be consistent.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY last_recorded_gc_info last_bgc_info[2];
+ // This is either 0 or 1.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY VOLATILE(int) last_bgc_info_index;
+ // Since a BGC can finish later than blocking GCs with larger indices,
+ // we can't just compare the index recorded in the GC info. We use this
+ // to know whether we should be looking for a bgc info or a blocking GC,
+ // if the user asks for the latest GC info of any kind.
+ // This can only go from false to true concurrently so if it is true,
+ // it means the bgc info is ready.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY VOLATILE(bool) is_last_recorded_bgc;
#endif //BACKGROUND_GC
- PER_HEAP_FIELD_DIAG_ONLY fgm_history fgm_result;
-
-#ifndef USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t eph_gen_starts_size;
-#endif //!USE_REGIONS
-
-#ifdef GC_CONFIG_DRIVEN
- // 0 stores compacting GCs;
- // 1 stores sweeping GCs;
- PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t compact_or_sweep_gcs[2];
+#ifdef FEATURE_EVENT_TRACE
+ // Initialized each time in mark_phase and background_mark_phase (during the 2nd non concurrent stage)
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool informational_event_enabled_p;
- PER_HEAP_FIELD_DIAG_ONLY size_t interesting_data_per_gc[max_idp_count];
-#endif //GC_CONFIG_DRIVEN
+ // Time is all in microseconds here. These are times recorded during STW.
+ //
+ // Note that the goal of this is not to show every single type of roots
+ // For that you have the per heap MarkWithType events. This takes advantage
+ // of the joins we already have and naturally gets the time between each
+ // join.
+ enum etw_gc_time_info
+ {
+ time_mark_sizedref = 0,
+ // Note time_mark_roots does not include scanning sizedref handles.
+ time_mark_roots = 1,
+ time_mark_short_weak = 2,
+ time_mark_scan_finalization = 3,
+ time_mark_long_weak = 4,
+ max_bgc_time_type = 5,
+ time_plan = 5,
+ time_relocate = 6,
+ time_sweep = 6,
+ max_sweep_time_type = 7,
+ time_compact = 7,
+ max_compact_time_type = 8
+ };
- // TODO: should just get rid of this for regions.
- PER_HEAP_FIELD_SINGLE_GC BOOL ro_segments_in_range;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t* gc_time_info;
#ifdef BACKGROUND_GC
- // This is maintained in the way that BGC will add to it and it will be changed
- // during the first blocking GC happens after this BGC is finished.
- PER_HEAP_FIELD_MAINTAINED heap_segment* freeable_soh_segment;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY uint64_t* bgc_time_info;
#endif //BACKGROUND_GC
- // This is maintained as BGC can add to it and it can be changed by the next blocking
- // GC that happens. it can also be added to during a blocking GC and will be freed later
- // during that GC.
- PER_HEAP_FIELD_MAINTAINED heap_segment* freeable_uoh_segment;
+#ifdef FEATURE_LOH_COMPACTION
+ // This records the LOH compaction info -
+ // time it takes to plan, relocate and compact.
+ // We want to see how reference rich large objects are so
+ // we also record ref info. Survived bytes are already recorded
+ // in gc_generation_data of the perheap history event.
+ //
+ // If we don't end up actually doing LOH compaction because plan
+ // failed, the time would all be 0s.
+ struct etw_loh_compact_info
+ {
+ uint32_t time_plan;
+ uint32_t time_compact;
+ uint32_t time_relocate;
+ size_t total_refs;
+ size_t zero_refs;
+ };
-#ifdef USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_MAINTAINED region_free_list global_regions_to_decommit[count_free_region_kinds];
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY etw_loh_compact_info* loh_compact_info;
+#endif //FEATURE_LOH_COMPACTION
- PER_HEAP_ISOLATED_FIELD_MAINTAINED region_free_list global_free_huge_regions;
-#else //USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_MAINTAINED heap_segment* segment_standby_list;
+ // config stuff and only init-ed once at the beginning.
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t physical_memory_from_config;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t gen0_min_budget_from_config;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t gen0_max_budget_from_config;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY int high_mem_percent_from_config;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool use_frozen_segments_p;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY bool hard_limit_config_p;
+#endif //FEATURE_EVENT_TRACE
- PER_HEAP_FIELD_SINGLE_GC size_t ordered_free_space_indices[MAX_NUM_BUCKETS];
- PER_HEAP_FIELD_SINGLE_GC size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS];
- PER_HEAP_FIELD_SINGLE_GC size_t ordered_plug_indices[MAX_NUM_BUCKETS];
- PER_HEAP_FIELD_SINGLE_GC size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS];
- PER_HEAP_FIELD_SINGLE_GC BOOL ordered_plug_indices_init;
- PER_HEAP_FIELD_SINGLE_GC BOOL use_bestfit;
- PER_HEAP_FIELD_SINGLE_GC uint8_t* bestfit_first_pin;
- PER_HEAP_FIELD_SINGLE_GC BOOL commit_end_of_seg;
- PER_HEAP_FIELD_SINGLE_GC size_t max_free_space_items; // dynamically adjusted.
- PER_HEAP_FIELD_SINGLE_GC size_t free_space_buckets;
- PER_HEAP_FIELD_SINGLE_GC size_t free_space_items;
- // -1 means we are using all the free
- // spaces we have (not including
- // end of seg space).
- PER_HEAP_FIELD_SINGLE_GC int trimmed_free_space_index;
- PER_HEAP_FIELD_SINGLE_GC size_t total_ephemeral_plugs;
- PER_HEAP_FIELD_SINGLE_GC seg_free_spaces* bestfit_seg;
- // Note: we know this from the plan phase.
- // total_ephemeral_plugs actually has the same value
- // but while we are calculating its value we also store
- // info on how big the plugs are for best fit which we
- // don't do in plan phase.
- // TODO: get rid of total_ephemeral_plugs.
- PER_HEAP_FIELD_SINGLE_GC size_t total_ephemeral_size;
-#endif //USE_REGIONS
+#ifdef HEAP_BALANCE_INSTRUMENTATION
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t last_gc_end_time_us;
+#endif //HEAP_BALANCE_INSTRUMENTATION
-public:
+#ifdef GC_CONFIG_DRIVEN
+ // 0 stores compacting GCs;
+ // 1 stores sweeping GCs;
+ PER_HEAP_ISOLATED_FIELD_DIAG_ONLY size_t compact_or_sweep_gcs[2];
+#endif //GC_CONFIG_DRIVEN
#ifdef HEAP_ANALYZE
-
PER_HEAP_ISOLATED_FIELD_DIAG_ONLY BOOL heap_analyze_enabled;
- PER_HEAP_FIELD_DIAG_ONLY size_t internal_root_array_length;
-
- // next two fields are used to optimize the search for the object
- // enclosing the current reference handled by ha_mark_object_simple.
- PER_HEAP_FIELD_DIAG_ONLY uint8_t* current_obj;
- PER_HEAP_FIELD_DIAG_ONLY size_t current_obj_size;
-
#endif //HEAP_ANALYZE
-public:
-
- PER_HEAP_FIELD_SINGLE_GC int condemned_generation_num;
+ /***************************************************/
+ // Fields that don't fit into the above categories //
+ /***************************************************/
- PER_HEAP_FIELD_SINGLE_GC BOOL blocking_collection;
-
-#ifdef MULTIPLE_HEAPS
- // Init-ed in GCHeap::Initialize
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY int n_heaps;
- // Init-ed in gc_heap::initialize_gc
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY gc_heap** g_heaps;
+ // See comments in reset_memory. Can be reset to TRUE at any time.
+ PER_HEAP_ISOLATED_FIELD BOOL reset_mm_p;
-#if !defined(USE_REGIONS) || defined(_DEBUG)
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t* g_promoted;
-#endif //!USE_REGIONS || _DEBUG
+ // Full GC Notification percentages. It's set by the RegisterForFullGCNotification API
+ PER_HEAP_FIELD uint32_t fgn_maxgen_percent;
+ PER_HEAP_FIELD size_t fgn_last_alloc;
+ PER_HEAP_ISOLATED_FIELD uint32_t fgn_loh_percent;
+ PER_HEAP_ISOLATED_FIELD VOLATILE(bool) full_gc_approach_event_set;
#ifdef BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t* g_bpromoted;
-#endif //BACKGROUND_GC
+ // Changed in a single GC and reset outside GC in API implementation.
+ PER_HEAP_ISOLATED_FIELD BOOL fgn_last_gc_was_concurrent;
-#ifdef MH_SC_MARK
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC int* g_mark_stack_busy;
-#endif //MH_SC_MARK
-#else //MULTIPLE_HEAPS
-#if !defined(USE_REGIONS) || defined(_DEBUG)
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t g_promoted;
-#endif //!USE_REGIONS || _DEBUG
-#ifdef BACKGROUND_GC
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC size_t g_bpromoted;
+ // Only changed by API
+ PER_HEAP_ISOLATED_FIELD bool temp_disable_concurrent_p;
#endif //BACKGROUND_GC
-#endif //MULTIPLE_HEAPS
-
- // For segments this is maintained; for regions it's just called during init
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t reserved_memory;
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t reserved_memory_limit;
-
- PER_HEAP_ISOLATED_FIELD_SINGLE_GC BOOL g_low_memory_status;
-#ifdef FEATURE_CARD_MARKING_STEALING
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_soh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) card_mark_done_soh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_loh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(uint32_t) card_mark_chunk_index_poh;
- PER_HEAP_FIELD_SINGLE_GC VOLATILE(bool) card_mark_done_uoh;
+public:
- PER_HEAP_METHOD void reset_card_marking_enumerators()
- {
- // set chunk index to all 1 bits so that incrementing it yields 0 as the first index
- card_mark_chunk_index_soh = ~0;
- card_mark_done_soh = false;
+ /***************************************************************************************************/
+ // public methods //
+ /***************************************************************************************************/
+ PER_HEAP_ISOLATED_METHOD heap_segment* make_heap_segment(uint8_t* new_pages,
+ size_t size,
+ gc_heap* hp,
+ int gen_num);
- card_mark_chunk_index_loh = ~0;
- card_mark_chunk_index_poh = ~0;
- card_mark_done_uoh = false;
- }
+ // Returns TRUE if the current thread used to be in cooperative mode
+ // before calling this function.
+ PER_HEAP_ISOLATED_METHOD bool enable_preemptive();
+ PER_HEAP_ISOLATED_METHOD void disable_preemptive(bool restore_cooperative);
- PER_HEAP_METHOD bool find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg,
- size_t& n_card_set, uint8_t*& start_address, uint8_t*& limit,
- size_t& card, size_t& end_card, size_t& card_word_end);
-#endif //FEATURE_CARD_MARKING_STEALING
+ PER_HEAP_ISOLATED_METHOD uint32_t wait_for_gc_done(int32_t timeOut = INFINITE);
- PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t smoothed_desired_per_heap[total_generation_count];
+ /***************************************************************************************************/
+ // public fields //
+ /***************************************************************************************************/
+ PER_HEAP_ISOLATED_FIELD_SINGLE_GC VOLATILE(BOOL) gc_started;
- PER_HEAP_ISOLATED_METHOD size_t exponential_smoothing (int gen, size_t collection_count, size_t desired_per_heap);
+ // For regions this is for region size.
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t min_segment_size_shr;
- PER_HEAP_ISOLATED_METHOD BOOL dt_high_memory_load_p();
+ // For segments this is maintained; for regions it's just called during init
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t reserved_memory;
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY size_t reserved_memory_limit;
-protected:
- PER_HEAP_METHOD void update_collection_counts ();
+#ifdef MULTIPLE_HEAPS
+ // Init-ed in GCHeap::Initialize
+ PER_HEAP_ISOLATED_FIELD_INIT_ONLY int n_heaps;
+#endif //MULTIPLE_HEAPS
- // maintained as we need to grow bookkeeping data.
- PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t card_table_element_layout[total_bookkeeping_elements + 1];
+#ifdef FEATURE_BASICFREEZE
+ PER_HEAP_ISOLATED_FIELD_MAINTAINED sorted_table* seg_table;
+#endif //FEATURE_BASICFREEZE
-#ifdef USE_REGIONS
- PER_HEAP_ISOLATED_FIELD_INIT_ONLY uint8_t* bookkeeping_covered_start;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED uint8_t* bookkeeping_covered_committed;
- PER_HEAP_ISOLATED_FIELD_MAINTAINED size_t bookkeeping_sizes[total_bookkeeping_elements];
-#endif //USE_REGIONS
- PER_HEAP_FIELD_SINGLE_GC mark_queue_t mark_queue;
}; // class gc_heap
#ifdef FEATURE_PREMORTEM_FINALIZATION