#define FOH_COMMIT_SIZE (64 * 1024)
FrozenObjectHeapManager::FrozenObjectHeapManager():
- m_Crst(CrstFrozenObjectHeap, CRST_UNSAFE_COOPGC),
+ m_Crst(CrstFrozenObjectHeap, CRST_UNSAFE_ANYMODE),
+ m_SegmentRegistrationCrst(CrstFrozenObjectHeap),
m_CurrentSegment(nullptr)
{
}
// Allocates an object of the give size (including header) on a frozen segment.
// May return nullptr if object is too large (larger than FOH_COMMIT_SIZE)
// in such cases caller is responsible to find a more appropriate heap to allocate it
-Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t objectSize, bool publish)
+
+Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t objectSize,
+ void(*initFunc)(Object*, void*), void* pParam)
{
CONTRACTL
{
#else // FEATURE_BASICFREEZE
Object* obj = nullptr;
- {
- CrstHolder ch(&m_Crst);
-
- _ASSERT(type != nullptr);
- _ASSERT(FOH_COMMIT_SIZE >= MIN_OBJECT_SIZE);
-
- // Currently we don't support frozen objects with special alignment requirements
- // TODO: We should also give up on arrays of doubles on 32-bit platforms.
- // (we currently never allocate them on frozen segments)
- #ifdef FEATURE_64BIT_ALIGNMENT
- if (type->RequiresAlign8())
- {
- // Align8 objects are not supported yet
- return nullptr;
- }
- #endif
+ FrozenObjectSegment* curSeg = nullptr;
+ uint8_t* curSegmentCurrent = nullptr;
+ size_t curSegSizeCommitted = 0;
- // NOTE: objectSize is expected be the full size including header
- _ASSERT(objectSize >= MIN_OBJECT_SIZE);
-
- if (objectSize > FOH_COMMIT_SIZE)
+ {
+ GCX_PREEMP();
{
- // The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
- // since FrozenObjectHeap is just an optimization, let's not fill it with huge objects.
- return nullptr;
- }
-
- if (m_CurrentSegment == nullptr)
+ CrstHolder ch(&m_Crst);
+
+ _ASSERT(type != nullptr);
+ _ASSERT(FOH_COMMIT_SIZE >= MIN_OBJECT_SIZE);
+
+ // Currently we don't support frozen objects with special alignment requirements
+ // TODO: We should also give up on arrays of doubles on 32-bit platforms.
+ // (we currently never allocate them on frozen segments)
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (type->RequiresAlign8())
+ {
+ // Align8 objects are not supported yet
+ return nullptr;
+ }
+#endif
+
+ // NOTE: objectSize is expected be the full size including header
+ _ASSERT(objectSize >= MIN_OBJECT_SIZE);
+
+ if (objectSize > FOH_COMMIT_SIZE)
+ {
+ // The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
+ // since FrozenObjectHeap is just an optimization, let's not fill it with huge objects.
+ return nullptr;
+ }
+
+ obj = m_CurrentSegment == nullptr ? nullptr : m_CurrentSegment->TryAllocateObject(type, objectSize);
+ // obj is nullptr if the current segment is full or hasn't been allocated yet
+ if (obj == nullptr)
+ {
+ size_t newSegmentSize = FOH_SEGMENT_DEFAULT_SIZE;
+ if (m_CurrentSegment != nullptr)
+ {
+ // Double the reserved size to reduce the number of frozen segments in apps with lots of frozen objects
+ // Use the same size in case if prevSegmentSize*2 operation overflows.
+ const size_t prevSegmentSize = m_CurrentSegment->m_Size;
+ newSegmentSize = max(prevSegmentSize, prevSegmentSize * 2);
+ }
+
+ m_CurrentSegment = new FrozenObjectSegment(newSegmentSize);
+ m_FrozenSegments.Append(m_CurrentSegment);
+
+ // Try again
+ obj = m_CurrentSegment->TryAllocateObject(type, objectSize);
+
+ // This time it's not expected to be null
+ _ASSERT(obj != nullptr);
+ }
+
+ if (initFunc != nullptr)
+ {
+ initFunc(obj, pParam);
+ }
+
+ curSeg = m_CurrentSegment;
+ curSegSizeCommitted = curSeg->m_SizeCommitted;
+ curSegmentCurrent = curSeg->m_pCurrent;
+ } // end of m_Crst lock
+
+ // Let GC know about the new segment or changes in it.
+ // We do it under a new lock because the main one (m_Crst) can be used by Profiler in a GC's thread
+ // and that might cause deadlocks since RegisterFrozenSegment may stuck on GC's lock.
{
- // Create the first segment on first allocation
- m_CurrentSegment = new FrozenObjectSegment(FOH_SEGMENT_DEFAULT_SIZE);
- m_FrozenSegments.Append(m_CurrentSegment);
- _ASSERT(m_CurrentSegment != nullptr);
+ CrstHolder regLock(&m_SegmentRegistrationCrst);
+ curSeg->RegisterOrUpdate(curSegmentCurrent, curSegSizeCommitted);
}
- obj = m_CurrentSegment->TryAllocateObject(type, objectSize);
-
- // The only case where it can be null is when the current segment is full and we need
- // to create a new one
- if (obj == nullptr)
- {
- // Double the reserved size to reduce the number of frozen segments in apps with lots of frozen objects
- // Use the same size in case if prevSegmentSize*2 operation overflows.
- size_t prevSegmentSize = m_CurrentSegment->GetSize();
- m_CurrentSegment = new FrozenObjectSegment(max(prevSegmentSize, prevSegmentSize * 2));
- m_FrozenSegments.Append(m_CurrentSegment);
-
- // Try again
- obj = m_CurrentSegment->TryAllocateObject(type, objectSize);
+ } // end of GCX_PREEMP
- // This time it's not expected to be null
- _ASSERT(obj != nullptr);
- }
- }
- if (publish)
- {
- PublishFrozenObject(obj);
- }
+ PublishFrozenObject(obj);
return obj;
#endif // !FEATURE_BASICFREEZE
FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
m_pStart(nullptr),
m_pCurrent(nullptr),
+ m_pCurrentRegistered(nullptr),
m_SizeCommitted(0),
m_Size(sizeHint),
m_SegmentHandle(nullptr)
- COMMA_INDEBUG(m_ObjectsCount(0))
{
_ASSERT(m_Size > FOH_COMMIT_SIZE);
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
ThrowOutOfMemory();
}
+ m_pStart = static_cast<uint8_t*>(committedAlloc);
+ m_pCurrent = m_pStart + sizeof(ObjHeader);
+ m_SizeCommitted = FOH_COMMIT_SIZE;
+
// ClrVirtualAlloc is expected to be PageSize-aligned so we can expect
// DATA_ALIGNMENT alignment as well
_ASSERT(IS_ALIGNED(committedAlloc, DATA_ALIGNMENT));
+}
- segment_info si;
- si.pvMem = committedAlloc;
- si.ibFirstObject = sizeof(ObjHeader);
- si.ibAllocated = si.ibFirstObject;
- si.ibCommit = FOH_COMMIT_SIZE;
- si.ibReserved = m_Size;
-
- m_SegmentHandle = GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&si);
- if (m_SegmentHandle == nullptr)
+void FrozenObjectSegment::RegisterOrUpdate(uint8_t* current, size_t sizeCommited)
+{
+ CONTRACTL
{
- ClrVirtualFree(alloc, 0, MEM_RELEASE);
- ThrowOutOfMemory();
+ THROWS;
+ MODE_PREEMPTIVE;
}
+ CONTRACTL_END
- m_pStart = static_cast<uint8_t*>(committedAlloc);
- m_pCurrent = m_pStart + sizeof(ObjHeader);
- m_SizeCommitted = si.ibCommit;
- INDEBUG(m_ObjectsCount = 0);
- return;
+ if (m_pCurrentRegistered == nullptr)
+ {
+ segment_info si;
+ si.pvMem = m_pStart;
+ si.ibFirstObject = sizeof(ObjHeader);
+ si.ibAllocated = (size_t)current;
+ si.ibCommit = sizeCommited;
+ si.ibReserved = m_Size;
+
+ // NOTE: RegisterFrozenSegment may take a GC lock inside.
+ m_SegmentHandle = GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&si);
+ if (m_SegmentHandle == nullptr)
+ {
+ ThrowOutOfMemory();
+ }
+ m_pCurrentRegistered = current;
+ }
+ else
+ {
+ if (current > m_pCurrentRegistered)
+ {
+ GCHeapUtilities::GetGCHeap()->UpdateFrozenSegment(
+ m_SegmentHandle, current, m_pStart + sizeCommited);
+ m_pCurrentRegistered = current;
+ }
+ else
+ {
+ // Some other thread already advanced it.
+ }
+ }
}
Object* FrozenObjectSegment::TryAllocateObject(PTR_MethodTable type, size_t objectSize)
{
- _ASSERT(m_pStart != nullptr && m_Size > 0 && m_SegmentHandle != nullptr); // Expected to be inited
+ _ASSERT((m_pStart != nullptr) && (m_Size > 0));
_ASSERT(IS_ALIGNED(m_pCurrent, DATA_ALIGNMENT));
_ASSERT(IS_ALIGNED(objectSize, DATA_ALIGNMENT));
_ASSERT(objectSize <= FOH_COMMIT_SIZE);
m_SizeCommitted += FOH_COMMIT_SIZE;
}
- INDEBUG(m_ObjectsCount++);
-
Object* object = reinterpret_cast<Object*>(m_pCurrent);
object->SetMethodTable(type);
m_pCurrent += objectSize;
- // Notify GC that we bumped the pointer and, probably, committed more memory in the reserved part
- GCHeapUtilities::GetGCHeap()->UpdateFrozenSegment(m_SegmentHandle, m_pCurrent, m_pStart + m_SizeCommitted);
-
return object;
}
{
public:
FrozenObjectHeapManager();
- Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize, bool publish = true);
+ Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize,
+ void(*initFunc)(Object*,void*) = nullptr, void* pParam = nullptr);
private:
Crst m_Crst;
+ Crst m_SegmentRegistrationCrst;
SArray<FrozenObjectSegment*> m_FrozenSegments;
FrozenObjectSegment* m_CurrentSegment;
public:
FrozenObjectSegment(size_t sizeHint);
Object* TryAllocateObject(PTR_MethodTable type, size_t objectSize);
- size_t GetSize() const
- {
- return m_Size;
- }
+ void RegisterOrUpdate(uint8_t* current, size_t sizeCommited);
private:
Object* GetFirstObject() const;
// Start of the reserved memory, the first object starts at "m_pStart + sizeof(ObjHeader)" (its pMT)
uint8_t* m_pStart;
+ // NOTE: To handle potential race conditions, only m_[x]Registered fields should be accessed
+ // externally as they guarantee that GC is aware of the current state of the segment.
+
// Pointer to the end of the current segment, ready to be used as a pMT for a new object
// meaning that "m_pCurrent - sizeof(ObjHeader)" is the actual start of the new object (header).
//
// m_pCurrent <= m_SizeCommitted
uint8_t* m_pCurrent;
+ // Last known value of m_pCurrent that GC is aware of.
+ //
+ // m_pCurrentRegistered <= m_pCurrent
+ uint8_t* m_pCurrentRegistered;
+
// Memory committed in the current segment
//
// m_SizeCommitted <= m_pStart + FOH_SIZE_RESERVED
size_t m_Size;
segment_handle m_SegmentHandle;
- INDEBUG(size_t m_ObjectsCount);
friend class ProfilerObjectEnum;
friend class ProfToEEInterfaceImpl;
+ friend class FrozenObjectHeapManager;
};
#endif // _FROZENOBJECTHEAP_H
#endif
FrozenObjectHeapManager* foh = SystemDomain::GetFrozenObjectHeapManager();
- ArrayBase* orArray = static_cast<ArrayBase*>(foh->TryAllocateObject(pArrayMT, PtrAlign(totalSize), /*publish*/ false));
+ ArrayBase* orArray = static_cast<ArrayBase*>(
+ foh->TryAllocateObject(pArrayMT, PtrAlign(totalSize), [](Object* obj, void* elemCntPtr){
+ // Initialize newly allocated object before publish
+ static_cast<ArrayBase*>(obj)->m_NumComponents = *static_cast<DWORD*>(elemCntPtr);
+ }, &cElements));
+
if (orArray == nullptr)
{
// We failed to allocate on a frozen segment, fallback to AllocateSzArray
// E.g. if the array is too big to fit on a frozen segment
return NULL;
}
- orArray->m_NumComponents = cElements;
-
- // Publish needs to be postponed in this case because we need to specify array length
- PublishObjectAndNotify(orArray, GC_ALLOC_NO_FLAGS);
-
return ObjectToOBJECTREF(orArray);
}
if (preferFrozenHeap)
{
FrozenObjectHeapManager* foh = SystemDomain::GetFrozenObjectHeapManager();
- orString = static_cast<StringObject*>(foh->TryAllocateObject(g_pStringClass, totalSize, /* publish = */false));
+
+ orString = static_cast<StringObject*>(foh->TryAllocateObject(
+ g_pStringClass, totalSize, [](Object* obj, void* pStrLen) {
+ // Initialize newly allocated object before publish
+ static_cast<StringObject*>(obj)->SetStringLength(*static_cast<DWORD*>(pStrLen));
+ }, &cchStringLength));
+
if (orString != nullptr)
{
- orString->SetStringLength(cchStringLength);
- // Publish needs to be postponed in this case because we need to specify string length
- PublishObjectAndNotify(orString, GC_ALLOC_NO_FLAGS);
_ASSERTE(orString->GetBuffer()[cchStringLength] == W('\0'));
orStringRef = ObjectToSTRINGREF(orString);
*pIsFrozen = true;
#endif // FEATURE_64BIT_ALIGNMENT
FrozenObjectHeapManager* foh = SystemDomain::GetFrozenObjectHeapManager();
- Object* orObject = foh->TryAllocateObject(pObjMT, PtrAlign(pObjMT->GetBaseSize()), /*publish*/ true);
+ Object* orObject = foh->TryAllocateObject(pObjMT, PtrAlign(pObjMT->GetBaseSize()));
return ObjectToOBJECTREF(orObject);
}