From: Sean Gillespie Date: Tue, 28 Mar 2017 16:59:04 +0000 (-0700) Subject: [Local GC] Use standard C++ types (bool) and consistent types (void*) on the interfac... X-Git-Tag: submit/tizen/20210909.063632~11030^2~7529 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ef9c0f3e37ff8d37f9569ad4ed5cffec2168f535;p=platform%2Fupstream%2Fdotnet%2Fruntime.git [Local GC] Use standard C++ types (bool) and consistent types (void*) on the interface (dotnet/coreclr#10463) * [Local GC] BOOL -> bool on IGCHeap * [Local GC] size_t -> void* on IGCHeap * [Local GC] Silence warnings by being explicit about BOOl -> bool conversions * Address code review feedback: FinalizeAppDomain BOOL -> bool * Fix warnings * Address code review feedback: 1) Fix a missed default parameter (FALSE) on a parameter of type bool, 2) Fix invocations of the diagnostic callbacks to use boolean literals instead of TRUE and FALSE, 3) Fix various invocations of GC interface methods in the VM to use boolean literals instead of TRUE and FALSE * Address code review feedback: fix inconsistency Commit migrated from https://github.com/dotnet/coreclr/commit/7e4afb4fbf900b789f53ccb816c6ddba7807de68 --- diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp index bacef7f..31ceeca 100644 --- a/src/coreclr/src/gc/gc.cpp +++ b/src/coreclr/src/gc/gc.cpp @@ -5857,7 +5857,7 @@ struct fix_alloc_context_args void fix_alloc_context(gc_alloc_context* acontext, void* param) { fix_alloc_context_args* args = (fix_alloc_context_args*)param; - g_theGCHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap); + g_theGCHeap->FixAllocContext(acontext, false, (void*)(size_t)(args->for_gc_p), args->heap); } void gc_heap::fix_allocation_contexts(BOOL for_gc_p) @@ -21332,7 +21332,7 @@ void gc_heap::relocate_in_loh_compact() generation_free_obj_space (gen))); } -void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn) +void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn) { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); @@ -21362,7 +21362,7 @@ void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc); - fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE); + fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false); o = o + size; if (o < heap_segment_allocated (seg)) @@ -24177,7 +24177,7 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation); ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0; - (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE); + (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false); if (check_last_object_p) { @@ -24245,7 +24245,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args) } } -void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn) +void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn) { generation* condemned_gen = generation_of (settings.condemned_generation); uint8_t* start_address = generation_allocation_start (condemned_gen); @@ -24301,7 +24301,7 @@ void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn) } } -void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type) +void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type) { if (type == walk_for_gc) walk_survivors_relocation (context, fn); @@ -24316,7 +24316,7 @@ void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type } #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) -void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn) +void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn) { // This should only be called for BGCs assert(settings.concurrent); @@ -24377,8 +24377,8 @@ void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn f plug_end, 0, // Reloc distance == 0 as this is non-compacting profiling_context, - FALSE, // Non-compacting - TRUE); // BGC + false, // Non-compacting + true); // BGC } seg = heap_segment_next (seg); @@ -30863,7 +30863,7 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp) return m; } -void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn) +void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn) { // Now walk the portion of memory that is actually being relocated. walk_relocation (profiling_context, fn); @@ -30876,7 +30876,7 @@ void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_f #endif //FEATURE_LOH_COMPACTION } -void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn) +void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn) { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen));; @@ -30914,7 +30914,7 @@ void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn f plug_end = o; - fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE); + fn (plug_start, plug_end, 0, profiling_context, false, false); } else { @@ -33750,7 +33750,7 @@ HRESULT GCHeap::Initialize () //// // GC callback functions -BOOL GCHeap::IsPromoted(Object* object) +bool GCHeap::IsPromoted(Object* object) { #ifdef _DEBUG ((CObjectHeader*)object)->Validate(); @@ -33769,7 +33769,7 @@ BOOL GCHeap::IsPromoted(Object* object) #ifdef BACKGROUND_GC if (gc_heap::settings.concurrent) { - BOOL is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))|| + bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))|| hp->background_marked (o)); return is_marked; } @@ -33810,11 +33810,11 @@ unsigned int GCHeap::WhichGeneration (Object* object) return g; } -BOOL GCHeap::IsEphemeral (Object* object) +bool GCHeap::IsEphemeral (Object* object) { uint8_t* o = (uint8_t*)object; gc_heap* hp = gc_heap::heap_of (o); - return hp->ephemeral_pointer_p (o); + return !!hp->ephemeral_pointer_p (o); } // Return NULL if can't find next object. When EE is not suspended, @@ -33888,7 +33888,7 @@ BOOL GCHeap::IsInFrozenSegment (Object * object) #endif //VERIFY_HEAP // returns TRUE if the pointer is in one of the GC heaps. -BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only) +bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only) { STATIC_CONTRACT_SO_TOLERANT; @@ -34059,7 +34059,7 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc, STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0)); } -/*static*/ BOOL GCHeap::IsObjectInFixedHeap(Object *pObj) +/*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj) { // For now we simply look at the size of the object to determine if it in the // fixed heap or not. If the bit indicating this gets set at some point @@ -34105,7 +34105,7 @@ int StressRNG(int iMaxValue) // free up object so that things will move and then do a GC //return TRUE if GC actually happens, otherwise FALSE -BOOL GCHeap::StressHeap(gc_alloc_context * context) +bool GCHeap::StressHeap(gc_alloc_context * context) { #if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) alloc_context* acontext = static_cast(context); @@ -34603,7 +34603,7 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_ } void -GCHeap::FixAllocContext (gc_alloc_context* context, BOOL lockp, void* arg, void *heap) +GCHeap::FixAllocContext (gc_alloc_context* context, bool lockp, void* arg, void *heap) { alloc_context* acontext = static_cast(context); #ifdef MULTIPLE_HEAPS @@ -34681,7 +34681,7 @@ BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p) // API to ensure that a complete new garbage collection takes place // HRESULT -GCHeap::GarbageCollect (int generation, BOOL low_memory_p, int mode) +GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode) { #if defined(BIT64) if (low_memory_p) @@ -35512,7 +35512,7 @@ void GCHeap::SetLOHCompactionMode (int newLOHCompactionyMode) #endif //FEATURE_LOH_COMPACTION } -BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage, +bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) { #ifdef MULTIPLE_HEAPS @@ -35535,7 +35535,7 @@ BOOL GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage, return TRUE; } -BOOL GCHeap::CancelFullGCNotification() +bool GCHeap::CancelFullGCNotification() { pGenGCHeap->fgn_maxgen_percent = 0; pGenGCHeap->fgn_loh_percent = 0; @@ -35562,7 +35562,7 @@ int GCHeap::WaitForFullGCComplete(int millisecondsTimeout) return result; } -int GCHeap::StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) +int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) { NoGCRegionLockHolder lh; @@ -35640,7 +35640,7 @@ HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters) } // Get the segment size to use, making sure it conforms. -size_t GCHeap::GetValidSegmentSize(BOOL large_seg) +size_t GCHeap::GetValidSegmentSize(bool large_seg) { return get_valid_segment_size (large_seg); } @@ -35764,15 +35764,15 @@ size_t GCHeap::GetFinalizablePromotedCount() #endif //MULTIPLE_HEAPS } -BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers) +bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers) { #ifdef MULTIPLE_HEAPS - BOOL foundp = FALSE; + bool foundp = false; for (int hn = 0; hn < gc_heap::n_heaps; hn++) { gc_heap* hp = gc_heap::g_heaps [hn]; if (hp->finalize_queue->FinalizeAppDomain (pDomain, fRunFinalizers)) - foundp = TRUE; + foundp = true; } return foundp; @@ -35781,13 +35781,13 @@ BOOL GCHeap::FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers) #endif //MULTIPLE_HEAPS } -BOOL GCHeap::ShouldRestartFinalizerWatchDog() +bool GCHeap::ShouldRestartFinalizerWatchDog() { // This condition was historically used as part of the condition to detect finalizer thread timeouts return gc_heap::gc_lock.lock != -1; } -void GCHeap::SetFinalizeQueueForShutdown(BOOL fHasLock) +void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock) { #ifdef MULTIPLE_HEAPS for (int hn = 0; hn < gc_heap::n_heaps; hn++) @@ -36173,10 +36173,10 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain, return finalizedFound; } -BOOL -CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers) +bool +CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers) { - BOOL finalizedFound = FALSE; + bool finalizedFound = false; unsigned int startSeg = gen_segment (max_generation); @@ -36186,7 +36186,7 @@ CFinalize::FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers) { if (FinalizeSegForAppDomain (pDomain, fRunFinalizers, Seg)) { - finalizedFound = TRUE; + finalizedFound = true; } } @@ -36594,13 +36594,13 @@ void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context) } } -void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) +void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type) { gc_heap* hp = (gc_heap*)gc_context; hp->walk_survivors (fn, diag_context, type); } -void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) +void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) { gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p); } @@ -36920,7 +36920,7 @@ void GCHeap::TemporaryDisableConcurrentGC() #endif //BACKGROUND_GC } -BOOL GCHeap::IsConcurrentGCEnabled() +bool GCHeap::IsConcurrentGCEnabled() { #ifdef BACKGROUND_GC return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p)); diff --git a/src/coreclr/src/gc/gc.h b/src/coreclr/src/gc/gc.h index d521f93..478e8cd 100644 --- a/src/coreclr/src/gc/gc.h +++ b/src/coreclr/src/gc/gc.h @@ -220,13 +220,13 @@ public: return IGCHeap::maxGeneration; } - BOOL IsValidSegmentSize(size_t cbSize) + bool IsValidSegmentSize(size_t cbSize) { //Must be aligned on a Mb and greater than 4Mb return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22)); } - BOOL IsValidGen0MaxSize(size_t cbSize) + bool IsValidGen0MaxSize(size_t cbSize) { return (cbSize >= 64*1024); } @@ -263,7 +263,7 @@ extern void FinalizeWeakReference(Object * obj); extern IGCHeapInternal* g_theGCHeap; #ifndef DACCESS_COMPILE -inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE) +inline bool IsGCInProgress(bool bConsiderGCStart = false) { WRAPPER_NO_CONTRACT; @@ -271,7 +271,7 @@ inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE) } #endif // DACCESS_COMPILE -inline BOOL IsServerHeap() +inline bool IsServerHeap() { LIMITED_METHOD_CONTRACT; #ifdef FEATURE_SVR_GC diff --git a/src/coreclr/src/gc/gcee.cpp b/src/coreclr/src/gc/gcee.cpp index 6513fde..a736a59 100644 --- a/src/coreclr/src/gc/gcee.cpp +++ b/src/coreclr/src/gc/gcee.cpp @@ -381,12 +381,12 @@ size_t GCHeap::GetNow() return GetHighPrecisionTimeStamp(); } -BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart) +bool GCHeap::IsGCInProgressHelper (bool bConsiderGCStart) { return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE); } -uint32_t GCHeap::WaitUntilGCComplete(BOOL bConsiderGCStart) +uint32_t GCHeap::WaitUntilGCComplete(bool bConsiderGCStart) { if (bConsiderGCStart) { @@ -427,7 +427,7 @@ BlockAgain: return dwWaitResult; } -void GCHeap::SetGCInProgress(BOOL fInProgress) +void GCHeap::SetGCInProgress(bool fInProgress) { GcInProgress = fInProgress; } @@ -445,12 +445,12 @@ void GCHeap::WaitUntilConcurrentGCComplete() #endif //BACKGROUND_GC } -BOOL GCHeap::IsConcurrentGCInProgress() +bool GCHeap::IsConcurrentGCInProgress() { #ifdef BACKGROUND_GC - return pGenGCHeap->settings.concurrent; + return !!pGenGCHeap->settings.concurrent; #else - return FALSE; + return false; #endif //BACKGROUND_GC } diff --git a/src/coreclr/src/gc/gcimpl.h b/src/coreclr/src/gc/gcimpl.h index e0008b9..2a51d47 100644 --- a/src/coreclr/src/gc/gcimpl.h +++ b/src/coreclr/src/gc/gcimpl.h @@ -85,11 +85,11 @@ public: void DiagTraceGCSegments (); void PublishObject(uint8_t* obj); - BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE); + bool IsGCInProgressHelper (bool bConsiderGCStart = false); - uint32_t WaitUntilGCComplete (BOOL bConsiderGCStart = FALSE); + uint32_t WaitUntilGCComplete (bool bConsiderGCStart = false); - void SetGCInProgress(BOOL fInProgress); + void SetGCInProgress(bool fInProgress); bool RuntimeStructuresValid(); @@ -106,7 +106,7 @@ public: Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags); void FixAllocContext (gc_alloc_context* acontext, - BOOL lockp, void* arg, void *heap); + bool lockp, void* arg, void *heap); Object* GetContainingObject(void *pInteriorPtr, bool fCollectedGenOnly); @@ -121,15 +121,15 @@ public: void HideAllocContext(alloc_context*); void RevealAllocContext(alloc_context*); - BOOL IsObjectInFixedHeap(Object *pObj); + bool IsObjectInFixedHeap(Object *pObj); - HRESULT GarbageCollect (int generation = -1, BOOL low_memory_p=FALSE, int mode=collection_blocking); + HRESULT GarbageCollect (int generation = -1, bool low_memory_p=false, int mode=collection_blocking); //// // GC callback functions // Check if an argument is promoted (ONLY CALL DURING // THE PROMOTIONSGRANTED CALLBACK.) - BOOL IsPromoted (Object *object); + bool IsPromoted (Object *object); size_t GetPromotedBytes (int heap_index); @@ -157,8 +157,8 @@ public: //returns the generation number of an object (not valid during relocation) unsigned WhichGeneration (Object* object); // returns TRUE is the object is ephemeral - BOOL IsEphemeral (Object* object); - BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE); + bool IsEphemeral (Object* object); + bool IsHeapPointer (void* object, bool small_heap_only = false); void ValidateObjectMember (Object *obj); @@ -173,13 +173,13 @@ public: int GetLOHCompactionMode(); void SetLOHCompactionMode(int newLOHCompactionyMode); - BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, + bool RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage); - BOOL CancelFullGCNotification(); + bool CancelFullGCNotification(); int WaitForFullGCApproach(int millisecondsTimeout); int WaitForFullGCComplete(int millisecondsTimeout); - int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC); + int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC); int EndNoGCRegion(); unsigned GetGcCount(); @@ -189,7 +189,7 @@ public: PER_HEAP_ISOLATED HRESULT GetGcCounters(int gen, gc_counters* counters); - size_t GetValidSegmentSize(BOOL large_seg = FALSE); + size_t GetValidSegmentSize(bool large_seg = false); static size_t GetValidGen0MaxSize(size_t seg_size); @@ -199,9 +199,9 @@ public: PER_HEAP_ISOLATED size_t GetNumberFinalizableObjects(); PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount(); - void SetFinalizeQueueForShutdown(BOOL fHasLock); - BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers); - BOOL ShouldRestartFinalizerWatchDog(); + void SetFinalizeQueueForShutdown(bool fHasLock); + bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers); + bool ShouldRestartFinalizerWatchDog(); void DiagWalkObject (Object* obj, walk_fn fn, void* context); void SetFinalizeRunOnShutdown(bool value); @@ -235,12 +235,12 @@ public: // FIX #ifndef DACCESS_COMPILE HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout); // Use in native threads. TRUE if succeed. FALSE if failed or timeout #endif - BOOL IsConcurrentGCInProgress(); + bool IsConcurrentGCInProgress(); // Enable/disable concurrent GC void TemporaryEnableConcurrentGC(); void TemporaryDisableConcurrentGC(); - BOOL IsConcurrentGCEnabled(); + bool IsConcurrentGCEnabled(); PER_HEAP_ISOLATED CLREvent *WaitForGCEvent; // used for syncing w/GC @@ -259,7 +259,7 @@ private: } public: //return TRUE if GC actually happens, otherwise FALSE - BOOL StressHeap(gc_alloc_context * acontext = 0); + bool StressHeap(gc_alloc_context * acontext); #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #ifdef STRESS_HEAP @@ -279,7 +279,7 @@ protected: virtual void DiagDescrGenerations (gen_walk_fn fn, void *context); - virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type); + virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type); virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn); @@ -289,7 +289,7 @@ protected: virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context); - virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); + virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p); public: Object * NextObj (Object * object); diff --git a/src/coreclr/src/gc/gcinterface.h b/src/coreclr/src/gc/gcinterface.h index 215f6ce..7aae605 100644 --- a/src/coreclr/src/gc/gcinterface.h +++ b/src/coreclr/src/gc/gcinterface.h @@ -378,12 +378,12 @@ typedef enum HNDTYPE_WEAK_WINRT = 9 } HandleType; -typedef BOOL (* walk_fn)(Object*, void*); +typedef bool (* walk_fn)(Object*, void*); typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved); -typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p); -typedef void (* fq_walk_fn)(BOOL, void*); +typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, void* context, bool compacting_p, bool bgc_p); +typedef void (* fq_walk_fn)(bool, void*); typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags); -typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent); +typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent); // IGCHeap is the interface that the VM will use when interacting with the GC. class IGCHeap { @@ -398,13 +398,13 @@ public: */ // Returns whether or not the given size is a valid segment size. - virtual BOOL IsValidSegmentSize(size_t size) = 0; + virtual bool IsValidSegmentSize(size_t size) = 0; // Returns whether or not the given size is a valid gen 0 max size. - virtual BOOL IsValidGen0MaxSize(size_t size) = 0; + virtual bool IsValidGen0MaxSize(size_t size) = 0; // Gets a valid segment size. - virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0; + virtual size_t GetValidSegmentSize(bool large_seg = false) = 0; // Sets the limit for reserved virtual memory. virtual void SetReservedVMLimit(size_t vmlimit) = 0; @@ -424,7 +424,7 @@ public: virtual void WaitUntilConcurrentGCComplete() = 0; // Returns true if a concurrent GC is in progress, false otherwise. - virtual BOOL IsConcurrentGCInProgress() = 0; + virtual bool IsConcurrentGCInProgress() = 0; // Temporarily enables concurrent GC, used during profiling. virtual void TemporaryEnableConcurrentGC() = 0; @@ -433,7 +433,7 @@ public: virtual void TemporaryDisableConcurrentGC() = 0; // Returns whether or not Concurrent GC is enabled. - virtual BOOL IsConcurrentGCEnabled() = 0; + virtual bool IsConcurrentGCEnabled() = 0; // Wait for a concurrent GC to complete if one is in progress, with the given timeout. virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout @@ -447,17 +447,17 @@ public: */ // Finalizes an app domain by finalizing objects within that app domain. - virtual BOOL FinalizeAppDomain(AppDomain* pDomain, BOOL fRunFinalizers) = 0; + virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0; // Finalizes all registered objects for shutdown, even if they are still reachable. - virtual void SetFinalizeQueueForShutdown(BOOL fHasLock) = 0; + virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0; // Gets the number of finalizable objects. virtual size_t GetNumberOfFinalizable() = 0; // Traditionally used by the finalizer thread on shutdown to determine // whether or not to time out. Returns true if the GC lock has not been taken. - virtual BOOL ShouldRestartFinalizerWatchDog() = 0; + virtual bool ShouldRestartFinalizerWatchDog() = 0; // Gets the next finalizable object. virtual Object* GetNextFinalizable() = 0; @@ -490,10 +490,10 @@ public: // Registers for a full GC notification, raising a notification if the gen 2 or // LOH object heap thresholds are exceeded. - virtual BOOL RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0; + virtual bool RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0; // Cancels a full GC notification that was requested by `RegisterForFullGCNotification`. - virtual BOOL CancelFullGCNotification() = 0; + virtual bool CancelFullGCNotification() = 0; // Returns the status of a registered notification for determining whether a blocking // Gen 2 collection is about to be initiated, with the given timeout. @@ -514,7 +514,7 @@ public: // Begins a no-GC region, returning a code indicating whether entering the no-GC // region was successful. - virtual int StartNoGCRegion(uint64_t totalSize, BOOL lohSizeKnown, uint64_t lohSize, BOOL disallowFullBlockingGC) = 0; + virtual int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) = 0; // Exits a no-GC region. virtual int EndNoGCRegion() = 0; @@ -524,7 +524,7 @@ public: // Forces a garbage collection of the given generation. Also used extensively // throughout the VM. - virtual HRESULT GarbageCollect(int generation = -1, BOOL low_memory_p = FALSE, int mode = collection_blocking) = 0; + virtual HRESULT GarbageCollect(int generation = -1, bool low_memory_p = false, int mode = collection_blocking) = 0; // Gets the largest GC generation. Also used extensively throughout the VM. virtual unsigned GetMaxGeneration() = 0; @@ -546,16 +546,16 @@ public: virtual HRESULT Initialize() = 0; // Returns whether nor this GC was promoted by the last GC. - virtual BOOL IsPromoted(Object* object) = 0; + virtual bool IsPromoted(Object* object) = 0; // Returns true if this pointer points into a GC heap, false otherwise. - virtual BOOL IsHeapPointer(void* object, BOOL small_heap_only = FALSE) = 0; + virtual bool IsHeapPointer(void* object, bool small_heap_only = false) = 0; // Return the generation that has been condemned by the current GC. virtual unsigned GetCondemnedGeneration() = 0; // Returns whether or not a GC is in progress. - virtual BOOL IsGCInProgressHelper(BOOL bConsiderGCStart = FALSE) = 0; + virtual bool IsGCInProgressHelper(bool bConsiderGCStart = false) = 0; // Returns the number of GCs that have occured. Mainly used for // sanity checks asserting that a GC has not occured. @@ -566,20 +566,20 @@ public: virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0; // Returns whether or not this object resides in an ephemeral generation. - virtual BOOL IsEphemeral(Object* object) = 0; + virtual bool IsEphemeral(Object* object) = 0; // Blocks until a GC is complete, returning a code indicating the wait was successful. - virtual uint32_t WaitUntilGCComplete(BOOL bConsiderGCStart = FALSE) = 0; + virtual uint32_t WaitUntilGCComplete(bool bConsiderGCStart = false) = 0; // "Fixes" an allocation context by binding its allocation pointer to a // location on the heap. - virtual void FixAllocContext(gc_alloc_context* acontext, BOOL lockp, void* arg, void* heap) = 0; + virtual void FixAllocContext(gc_alloc_context* acontext, bool lockp, void* arg, void* heap) = 0; // Gets the total survived size plus the total allocated bytes on the heap. virtual size_t GetCurrentObjSize() = 0; // Sets whether or not a GC is in progress. - virtual void SetGCInProgress(BOOL fInProgress) = 0; + virtual void SetGCInProgress(bool fInProgress) = 0; // Gets whether or not the GC runtime structures are in a valid state for heap traversal. virtual bool RuntimeStructuresValid() = 0; @@ -642,7 +642,7 @@ public: =========================================================================== */ // Returns whether or not this object is in the fixed heap. - virtual BOOL IsObjectInFixedHeap(Object* pObj) = 0; + virtual bool IsObjectInFixedHeap(Object* pObj) = 0; // Walks an object and validates its members. virtual void ValidateObjectMember(Object* obj) = 0; @@ -669,10 +669,10 @@ public: virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0; // Walk the heap object by object. - virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0; + virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) = 0; // Walks the survivors and get the relocation information if objects have moved. - virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0; + virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type) = 0; // Walks the finalization queue. virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0; @@ -700,7 +700,7 @@ public: // Returns TRUE if GC actually happens, otherwise FALSE. The passed alloc context // must not be null. - virtual BOOL StressHeap(gc_alloc_context* acontext) = 0; + virtual bool StressHeap(gc_alloc_context* acontext) = 0; /* =========================================================================== @@ -753,8 +753,8 @@ struct ScanContext Thread* thread_under_crawl; int thread_number; uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read - BOOL promotion; //TRUE: Promotion, FALSE: Relocation. - BOOL concurrent; //TRUE: concurrent scanning + bool promotion; //TRUE: Promotion, FALSE: Relocation. + bool concurrent; //TRUE: concurrent scanning #if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE) AppDomain *pCurrentDomain; #endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE @@ -775,8 +775,8 @@ struct ScanContext thread_under_crawl = 0; thread_number = -1; stack_limit = 0; - promotion = FALSE; - concurrent = FALSE; + promotion = false; + concurrent = false; #ifdef GC_PROFILING pMD = NULL; #endif //GC_PROFILING diff --git a/src/coreclr/src/gc/gcpriv.h b/src/coreclr/src/gc/gcpriv.h index b929198..92868bb 100644 --- a/src/coreclr/src/gc/gcpriv.h +++ b/src/coreclr/src/gc/gcpriv.h @@ -1298,19 +1298,19 @@ protected: uint8_t* last_plug; BOOL is_shortened; mark* pinned_plug_entry; - size_t profiling_context; + void* profiling_context; record_surv_fn fn; }; PER_HEAP - void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type); + void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type); PER_HEAP void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args); PER_HEAP - void walk_relocation (size_t profiling_context, record_surv_fn fn); + void walk_relocation (void* profiling_context, record_surv_fn fn); PER_HEAP void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args); @@ -1320,14 +1320,14 @@ protected: #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) PER_HEAP - void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn); + void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn); #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) // used in blocking GCs after plan phase so this walks the plugs. PER_HEAP - void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn); + void walk_survivors_relocation (void* profiling_context, record_surv_fn fn); PER_HEAP - void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn); + void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn); PER_HEAP int generation_to_condemn (int n, @@ -2168,7 +2168,7 @@ protected: void relocate_in_loh_compact(); PER_HEAP - void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn); + void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn); PER_HEAP BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len); @@ -3767,7 +3767,7 @@ public: void DiscardNonCriticalObjects(); //Methods used by the app domain unloading call to finalize objects in an app domain - BOOL FinalizeAppDomain (AppDomain *pDomain, BOOL fRunFinalizers); + bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers); void CheckFinalizerObjects(); diff --git a/src/coreclr/src/gc/gcscan.h b/src/coreclr/src/gc/gcscan.h index 306d5f2..c7060f3 100644 --- a/src/coreclr/src/gc/gcscan.h +++ b/src/coreclr/src/gc/gcscan.h @@ -92,11 +92,4 @@ class GCScan static VOLATILE(int32_t) m_GcStructuresInvalidCnt; }; -// These two functions are utilized to scan the heap if requested by ETW -// or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp. -#if defined(FEATURE_EVENT_TRACE) | defined(GC_PROFILING) -void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext * pSC, DWORD dwFlags); -BOOL HeapWalkHelper(Object * pBO, void * pvContext); -#endif - #endif // _GCSCAN_H_ diff --git a/src/coreclr/src/gc/objecthandle.cpp b/src/coreclr/src/gc/objecthandle.cpp index e8eed93..5f5ecbf 100644 --- a/src/coreclr/src/gc/objecthandle.cpp +++ b/src/coreclr/src/gc/objecthandle.cpp @@ -441,13 +441,13 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt ScanContext *pSC = (ScanContext *)lp1; uint32_t rootFlags = 0; - BOOL isDependent = FALSE; + bool isDependent = false; OBJECTHANDLE handle = (OBJECTHANDLE)(pRef); switch (HandleFetchType(handle)) { case HNDTYPE_DEPENDENT: - isDependent = TRUE; + isDependent = true; break; case HNDTYPE_WEAK_SHORT: case HNDTYPE_WEAK_LONG: diff --git a/src/coreclr/src/gc/objecthandle.h b/src/coreclr/src/gc/objecthandle.h index 9c885bb..b86572b 100644 --- a/src/coreclr/src/gc/objecthandle.h +++ b/src/coreclr/src/gc/objecthandle.h @@ -511,8 +511,6 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn); #endif -typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent); - void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn); diff --git a/src/coreclr/src/gc/sample/gcenv.ee.cpp b/src/coreclr/src/gc/sample/gcenv.ee.cpp index b339fcc..07be244 100644 --- a/src/coreclr/src/gc/sample/gcenv.ee.cpp +++ b/src/coreclr/src/gc/sample/gcenv.ee.cpp @@ -137,7 +137,7 @@ void ThreadStore::AttachCurrentThread() void GCToEEInterface::SuspendEE(SUSPEND_REASON reason) { - g_theGCHeap->SetGCInProgress(TRUE); + g_theGCHeap->SetGCInProgress(true); // TODO: Implement } @@ -146,7 +146,7 @@ void GCToEEInterface::RestartEE(bool bFinishedGC) { // TODO: Implement - g_theGCHeap->SetGCInProgress(FALSE); + g_theGCHeap->SetGCInProgress(false); } void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc) diff --git a/src/coreclr/src/inc/profilepriv.h b/src/coreclr/src/inc/profilepriv.h index f74818c..ae7b225 100644 --- a/src/coreclr/src/inc/profilepriv.h +++ b/src/coreclr/src/inc/profilepriv.h @@ -148,9 +148,9 @@ GVAL_DECL(ProfControlBlock, g_profControlBlock); #endif // defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPORTED) // This is the helper callback that the gc uses when walking the heap. -BOOL HeapWalkHelper(Object* pBO, void* pv); +bool HeapWalkHelper(Object* pBO, void* pv); void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext *pSC, uint32_t dwUnused); -BOOL AllocByClassHelper(Object* pBO, void* pv); +bool AllocByClassHelper(Object* pBO, void* pv); #endif // _ProfilePriv_h_ diff --git a/src/coreclr/src/vm/commemoryfailpoint.cpp b/src/coreclr/src/vm/commemoryfailpoint.cpp index 4d1ed6e..2900409 100644 --- a/src/coreclr/src/vm/commemoryfailpoint.cpp +++ b/src/coreclr/src/vm/commemoryfailpoint.cpp @@ -27,8 +27,8 @@ FCIMPL2(void, COMMemoryFailPoint::GetMemorySettings, UINT64* pMaxGCSegmentSize, FCALL_CONTRACT; IGCHeap * pGC = GCHeapUtilities::GetGCHeap(); - size_t segment_size = pGC->GetValidSegmentSize(FALSE); - size_t large_segment_size = pGC->GetValidSegmentSize(TRUE); + size_t segment_size = pGC->GetValidSegmentSize(false); + size_t large_segment_size = pGC->GetValidSegmentSize(true); _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX); if (segment_size > large_segment_size) *pMaxGCSegmentSize = (UINT64) segment_size; diff --git a/src/coreclr/src/vm/comutilnative.cpp b/src/coreclr/src/vm/comutilnative.cpp index 10d0225..b75f684 100644 --- a/src/coreclr/src/vm/comutilnative.cpp +++ b/src/coreclr/src/vm/comutilnative.cpp @@ -1777,9 +1777,9 @@ int QCALLTYPE GCInterface::StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, I GCX_COOP(); retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize, - lohSizeKnown, + !!lohSizeKnown, (ULONGLONG)lohSize, - disallowFullBlockingGC); + !!disallowFullBlockingGC); END_QCALL; @@ -1868,7 +1868,7 @@ void QCALLTYPE GCInterface::Collect(INT32 generation, INT32 mode) //We don't need to check the top end because the GC will take care of that. GCX_COOP(); - GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, mode); + GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, mode); END_QCALL; } @@ -2331,7 +2331,7 @@ NOINLINE void GCInterface::GarbageCollectModeAny(int generation) CONTRACTL_END; GCX_COOP(); - GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking); + GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, collection_non_blocking); } // diff --git a/src/coreclr/src/vm/eventtrace.cpp b/src/coreclr/src/vm/eventtrace.cpp index 2b45bf5..b708f17 100644 --- a/src/coreclr/src/vm/eventtrace.cpp +++ b/src/coreclr/src/vm/eventtrace.cpp @@ -439,8 +439,8 @@ VOID ETW::GCLog::GCSettingsEvent() ETW::GCLog::ETW_GC_INFO Info; Info.GCSettings.ServerGC = GCHeapUtilities::IsServerHeap (); - Info.GCSettings.SegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (FALSE); - Info.GCSettings.LargeObjectSegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (TRUE); + Info.GCSettings.SegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (false); + Info.GCSettings.LargeObjectSegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (true); FireEtwGCSettings_V1(Info.GCSettings.SegmentSize, Info.GCSettings.LargeObjectSegmentSize, Info.GCSettings.ServerGC, GetClrInstanceId()); } GCHeapUtilities::GetGCHeap()->DiagTraceGCSegments(); @@ -1035,7 +1035,7 @@ HRESULT ETW::GCLog::ForceGCForDiagnostics() hr = GCHeapUtilities::GetGCHeap()->GarbageCollect( -1, // all generations should be collected - FALSE, // low_memory_p + false, // low_memory_p collection_blocking); #ifndef FEATURE_REDHAWK diff --git a/src/coreclr/src/vm/finalizerthread.cpp b/src/coreclr/src/vm/finalizerthread.cpp index d111eeb..0a4da16 100644 --- a/src/coreclr/src/vm/finalizerthread.cpp +++ b/src/coreclr/src/vm/finalizerthread.cpp @@ -484,7 +484,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event) case (WAIT_OBJECT_0 + kLowMemoryNotification): //short on memory GC immediately GetFinalizerThread()->DisablePreemptiveGC(); - GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE); + GCHeapUtilities::GetGCHeap()->GarbageCollect(0, true); GetFinalizerThread()->EnablePreemptiveGC(); //wait only on the event for 2s switch (event->Wait(2000, FALSE)) @@ -575,7 +575,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args) { s_forcedGCInProgress = true; GetFinalizerThread()->DisablePreemptiveGC(); - GCHeapUtilities::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking); + GCHeapUtilities::GetGCHeap()->GarbageCollect(2, false, collection_blocking); GetFinalizerThread()->EnablePreemptiveGC(); s_forcedGCInProgress = false; @@ -645,7 +645,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args) } else if (UnloadingAppDomain == NULL) break; - else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload)) + else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, !!fRunFinalizersOnUnload)) { break; } diff --git a/src/coreclr/src/vm/gcenv.ee.cpp b/src/coreclr/src/vm/gcenv.ee.cpp index 392c179..e861995 100644 --- a/src/coreclr/src/vm/gcenv.ee.cpp +++ b/src/coreclr/src/vm/gcenv.ee.cpp @@ -955,7 +955,7 @@ void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, } } -void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent) +void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent) { ProfilingScanContext* pSC = (ProfilingScanContext*)context; @@ -1068,7 +1068,7 @@ void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForE // **** Walk objects on heap: only if profiling API wants them or ETW wants them. if (fProfilerPinned || fShouldWalkHeapObjectsForEtw) { - GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */); + GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */); } #ifdef FEATURE_EVENT_TRACE @@ -1118,7 +1118,7 @@ void GCProfileWalkHeap() #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) } -void WalkFReachableObjects(BOOL isCritical, void* objectID) +void WalkFReachableObjects(bool isCritical, void* objectID) { g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID); } @@ -1136,7 +1136,7 @@ void GCToEEInterface::DiagGCStart(int gen, bool isInduced) // When we're walking objects allocated by class, then we don't want to walk the large // object heap because then it would count things that may have been around for a while. - GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, FALSE); + GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false); // Notify that we've reached the end of the Gen 0 scan g_profControlBlock.pProfInterface->EndAllocByClass(&context); @@ -1183,13 +1183,13 @@ void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext) // don't get confused. void WalkMovedReferences(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, - size_t context, - BOOL fCompacting, - BOOL fBGC) + void* context, + bool fCompacting, + bool fBGC) { ETW::GCLog::MovedReference(begin, end, (fCompacting ? reloc : 0), - context, + (size_t)context, fCompacting, !fBGC); } @@ -1201,7 +1201,7 @@ void GCToEEInterface::DiagWalkSurvivors(void* gcContext) { size_t context = 0; ETW::GCLog::BeginMovedReferences(&context); - GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_gc); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc); ETW::GCLog::EndMovedReferences(context); } #endif //GC_PROFILING || FEATURE_EVENT_TRACE @@ -1214,7 +1214,7 @@ void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext) { size_t context = 0; ETW::GCLog::BeginMovedReferences(&context); - GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_loh); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh); ETW::GCLog::EndMovedReferences(context); } #endif //GC_PROFILING || FEATURE_EVENT_TRACE @@ -1227,7 +1227,7 @@ void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext) { size_t context = 0; ETW::GCLog::BeginMovedReferences(&context); - GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_bgc); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc); ETW::GCLog::EndMovedReferences(context); } #endif //GC_PROFILING || FEATURE_EVENT_TRACE diff --git a/src/coreclr/src/vm/gcheaputilities.h b/src/coreclr/src/vm/gcheaputilities.h index d5ce46c..0680763 100644 --- a/src/coreclr/src/vm/gcheaputilities.h +++ b/src/coreclr/src/vm/gcheaputilities.h @@ -81,7 +81,7 @@ public: // Returns true if a the heap is initialized and a garbage collection // is in progress, false otherwise. - inline static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE) + inline static bool IsGCInProgress(bool bConsiderGCStart = false) { WRAPPER_NO_CONTRACT; @@ -90,7 +90,7 @@ public: // Returns true if we should be competing marking for statics. This // influences the behavior of `GCToEEInterface::GcScanRoots`. - inline static BOOL MarkShouldCompeteForStatics() + inline static bool MarkShouldCompeteForStatics() { WRAPPER_NO_CONTRACT; @@ -98,7 +98,7 @@ public: } // Waits until a GC is complete, if the heap has been initialized. - inline static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE) + inline static void WaitForGCCompletion(bool bConsiderGCStart = false) { WRAPPER_NO_CONTRACT; diff --git a/src/coreclr/src/vm/gcscan.h b/src/coreclr/src/vm/gcscan.h deleted file mode 100644 index aba1e6b..0000000 --- a/src/coreclr/src/vm/gcscan.h +++ /dev/null @@ -1,5 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. - -#include "../gc/gcscan.h" diff --git a/src/coreclr/src/vm/object.cpp b/src/coreclr/src/vm/object.cpp index c37aacc..1725ef7 100644 --- a/src/coreclr/src/vm/object.cpp +++ b/src/coreclr/src/vm/object.cpp @@ -1726,7 +1726,7 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE; if (!noRangeChecks) { - bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, TRUE); + bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, true); if (!bSmallObjectHeapPtr) bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this); diff --git a/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp b/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp index 40f0864..cfd99ad 100644 --- a/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp +++ b/src/coreclr/src/vm/proftoeeinterfaceimpl.cpp @@ -1022,7 +1022,7 @@ ClassID SafeGetClassIDFromObject(Object * pObj) //--------------------------------------------------------------------------------------- // -// Callback of type walk_fn used by GCHeapUtilities::DiagWalkObject. Keeps a count of each +// Callback of type walk_fn used by IGCHeap::DiagWalkObject. Keeps a count of each // object reference found. // // Arguments: @@ -1033,7 +1033,7 @@ ClassID SafeGetClassIDFromObject(Object * pObj) // Always returns TRUE to object walker so it walks the entire object // -BOOL CountContainedObjectRef(Object * pBO, void * context) +bool CountContainedObjectRef(Object * pBO, void * context) { LIMITED_METHOD_CONTRACT; // Increase the count @@ -1044,7 +1044,7 @@ BOOL CountContainedObjectRef(Object * pBO, void * context) //--------------------------------------------------------------------------------------- // -// Callback of type walk_fn used by GCHeapUtilities::DiagWalkObject. Stores each object reference +// Callback of type walk_fn used by IGCHeap::DiagWalkObject. Stores each object reference // encountered into an array. // // Arguments: @@ -1058,7 +1058,7 @@ BOOL CountContainedObjectRef(Object * pBO, void * context) // Always returns TRUE to object walker so it walks the entire object // -BOOL SaveContainedObjectRef(Object * pBO, void * context) +bool SaveContainedObjectRef(Object * pBO, void * context) { LIMITED_METHOD_CONTRACT; // Assign the value @@ -1096,7 +1096,7 @@ BOOL SaveContainedObjectRef(Object * pBO, void * context) // extern bool s_forcedGCInProgress; -BOOL HeapWalkHelper(Object * pBO, void * pvContext) +bool HeapWalkHelper(Object * pBO, void * pvContext) { CONTRACTL { @@ -1221,7 +1221,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext) // Currently always returns TRUE // -BOOL AllocByClassHelper(Object * pBO, void * pv) +bool AllocByClassHelper(Object * pBO, void * pv) { CONTRACTL { diff --git a/src/coreclr/src/vm/rcwwalker.cpp b/src/coreclr/src/vm/rcwwalker.cpp index 0b87536..7ecf2b6 100644 --- a/src/coreclr/src/vm/rcwwalker.cpp +++ b/src/coreclr/src/vm/rcwwalker.cpp @@ -129,7 +129,7 @@ STDMETHODIMP CLRServicesImpl::GarbageCollect(DWORD dwFlags) { GCX_COOP_THREAD_EXISTS(GET_THREAD()); if (dwFlags & GC_FOR_APPX_SUSPEND) { - GCHeapUtilities::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized); + GCHeapUtilities::GetGCHeap()->GarbageCollect(2, true, collection_blocking | collection_optimized); } else GCHeapUtilities::GetGCHeap()->GarbageCollect(); diff --git a/src/coreclr/src/vm/threads.cpp b/src/coreclr/src/vm/threads.cpp index 6d9d10b..da805bd 100644 --- a/src/coreclr/src/vm/threads.cpp +++ b/src/coreclr/src/vm/threads.cpp @@ -3400,7 +3400,7 @@ void Thread::OnThreadTerminate(BOOL holdingLock) if (ThisThreadID == CurrentThreadID) { GCX_COOP(); - GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL); + GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL); m_alloc_context.init(); } } @@ -3457,7 +3457,7 @@ void Thread::OnThreadTerminate(BOOL holdingLock) { // We must be holding the ThreadStore lock in order to clean up alloc context. // We should never call FixAllocContext during GC. - GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL); + GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL); m_alloc_context.init(); } diff --git a/src/coreclr/src/vm/threadsuspend.cpp b/src/coreclr/src/vm/threadsuspend.cpp index 71409ce..ab1f2bbf 100644 --- a/src/coreclr/src/vm/threadsuspend.cpp +++ b/src/coreclr/src/vm/threadsuspend.cpp @@ -7164,7 +7164,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded) // Revert to being a normal thread // ClrFlsClearThreadType (ThreadType_DynamicSuspendEE); - GCHeapUtilities::GetGCHeap()->SetGCInProgress(FALSE); + GCHeapUtilities::GetGCHeap()->SetGCInProgress(false); // // Allow threads to enter COOP mode (though we still need to wake the ones @@ -7332,7 +7332,7 @@ retry_for_debugger: // It seems like much of the above is redundant. We should investigate reducing the number // of mechanisms we use to indicate that a suspension is in progress. // - GCHeapUtilities::GetGCHeap()->SetGCInProgress(TRUE); + GCHeapUtilities::GetGCHeap()->SetGCInProgress(true); // // Gratuitous memory barrier. (may be needed - but I'm not sure why.)