if (pThis == NULL)
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
- Thread *pThread = GetThread();
-
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
AssemblySpec spec;
- spec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &pThis, FALSE);
+ spec.InitializeSpec(pStackingAllocator, (ASSEMBLYNAMEREF*) &pThis, FALSE);
StackSString name;
spec.GetFileOrDisplayName(ASM_DISPLAYF_VERSION |
if (pThis == NULL)
COMPlusThrow(kNullReferenceException, W("NullReference_This"));
- Thread * pThread = GetThread();
-
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
AssemblySpec spec;
- hr = spec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF *) &pThis, TRUE);
+ hr = spec.InitializeSpec(pStackingAllocator, (ASSEMBLYNAMEREF *) &pThis, TRUE);
if (SUCCEEDED(hr))
{
if (gc.assemblyName == NULL)
COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
- Thread * pThread = GetThread();
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
DomainAssembly * pParentAssembly = NULL;
Assembly * pRefAssembly = NULL;
// Initialize spec
AssemblySpec spec;
- spec.InitializeSpec(&(pThread->m_MarshalAlloc),
+ spec.InitializeSpec(pStackingAllocator,
&gc.assemblyName,
FALSE);
}
if (gc.codeBase != NULL)
- spec.SetCodeBase(&(pThread->m_MarshalAlloc), &gc.codeBase);
+ spec.SetCodeBase(pStackingAllocator, &gc.codeBase);
if (pParentAssembly != NULL)
spec.SetParentAssembly(pParentAssembly);
cVtableThunks += pFixupTable[iFixup].Count;
}
- Thread *pThread = GetThread();
- StackingAllocator *pAlloc = &pThread->m_MarshalAlloc;
- CheckPointHolder cph(pAlloc->GetCheckpoint());
+ ACQUIRE_STACKING_ALLOCATOR(pAlloc);
// Allocate the working array of tokens.
cMethodsToLoad = cVtableThunks;
// MethodTableBuilder uses the stacking allocator for most of it's
// working memory requirements, so this makes sure to free the memory
// once this function is out of scope.
- CheckPointHolder cph(GetThread()->m_MarshalAlloc.GetCheckpoint());
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
MethodTableBuilder::bmtMetaDataInfo bmtMetaData;
bmtMetaData.cFields = 1;
BaseDomain * pDomain = pMT->GetDomain();
MethodTableBuilder builder(pMT, pClass,
- &GetThread()->m_MarshalAlloc,
+ pStackingAllocator,
&dummyAmTracker);
MethodTableBuilder::bmtGenericsInfo genericsInfo;
// Get the new MethodDesc (Note: The method desc memory is zero initialized)
MethodDesc *pNewMD = pChunk->GetFirstMethodDesc();
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
+
// Initialize the new MethodDesc
MethodTableBuilder builder(pMT,
pClass,
- &GetThread()->m_MarshalAlloc,
+ pStackingAllocator,
&dummyAmTracker);
EX_TRY
{
#endif // _DEBUG
//Get Check Point for the thread-based allocator
- Thread *pThread = GetThread();
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
HRESULT hr = S_OK;
Module *pModule = pThisMT->GetModule();
// The interop data for the VTable for COM Interop backward compatibility
// Allocate space to hold on to the MethodDesc for each entry
- bmtVT.ppSDVtable = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData*[bmtVT.dwMaxVtableSize];
+ bmtVT.ppSDVtable = new (GetStackingAllocator()) InteropMethodTableSlotData*[bmtVT.dwMaxVtableSize];
ZeroMemory(bmtVT.ppSDVtable, bmtVT.dwMaxVtableSize * sizeof(InteropMethodTableSlotData*));
// Allocate space to hold on to the MethodDesc for each entry
- bmtVT.ppSDNonVtable = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData*[NumDeclaredMethods()];
+ bmtVT.ppSDNonVtable = new (GetStackingAllocator()) InteropMethodTableSlotData*[NumDeclaredMethods()];
ZeroMemory(bmtVT.ppSDNonVtable , sizeof(InteropMethodTableSlotData*)*NumDeclaredMethods());
DWORD cMaxEntries = (bmtVT.dwMaxVtableSize * 2) + (NumDeclaredMethods() * 2);
- InteropMethodTableSlotData *pInteropData = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData[cMaxEntries];
+ InteropMethodTableSlotData *pInteropData = new (GetStackingAllocator()) InteropMethodTableSlotData[cMaxEntries];
memset(pInteropData, 0, cMaxEntries * sizeof(InteropMethodTableSlotData));
- bmtVT.pInteropData = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotDataMap(pInteropData, cMaxEntries);
+ bmtVT.pInteropData = new (GetStackingAllocator()) InteropMethodTableSlotDataMap(pInteropData, cMaxEntries);
// Initialize the map with parent information
if (bmtParent.pParentMethodTable != NULL)
Module *pModule = GetModule();
// Allocate the BuildingInterfaceList table
- *ppBuildingInterfaceList = new(&pThread->m_MarshalAlloc) BuildingInterfaceInfo_t[cAllInterfaces];
+ *ppBuildingInterfaceList = new(GetStackingAllocator()) BuildingInterfaceInfo_t[cAllInterfaces];
BuildingInterfaceInfo_t *pInterfaceBuildInfo = *ppBuildingInterfaceList;
while (pMDImport->EnumNext(&hEnumInterfaceImpl, &ii))
slotNum = (WORD) pItfMD->GetSlot();
if (bmtInterface->pppInterfaceImplementingMD[j] == NULL)
{
- Thread *pThread = GetThread();
- StackingAllocator * pAlloc = &pThread->m_MarshalAlloc;
-
- bmtInterface->pppInterfaceImplementingMD[j] = new (pAlloc) MethodDesc * [pInterface->GetNumVirtuals()];
+ bmtInterface->pppInterfaceImplementingMD[j] = new (GetStackingAllocator()) MethodDesc * [pInterface->GetNumVirtuals()];
memset(bmtInterface->pppInterfaceImplementingMD[j], 0, sizeof(MethodDesc *) * pInterface->GetNumVirtuals());
- bmtInterface->pppInterfaceDeclaringMD[j] = new (pAlloc) MethodDesc * [pInterface->GetNumVirtuals()];
+ bmtInterface->pppInterfaceDeclaringMD[j] = new (GetStackingAllocator()) MethodDesc * [pInterface->GetNumVirtuals()];
memset(bmtInterface->pppInterfaceDeclaringMD[j], 0, sizeof(MethodDesc *) * pInterface->GetNumVirtuals());
}
}
// Create a fully expanded map of all interfaces we implement
- bmtInterface->pInterfaceMap = new (&pThread->m_MarshalAlloc) InterfaceInfo_t[bmtInterface->dwMaxExpandedInterfaces];
+ bmtInterface->pInterfaceMap = new (GetStackingAllocator()) InterfaceInfo_t[bmtInterface->dwMaxExpandedInterfaces];
// # slots of largest interface
bmtInterface->dwLargestInterfaceSize = 0;
// This is needed later - for each interface, we get the MethodDesc pointer for each
// method. We need to be able to persist at most one interface at a time, so we
// need enough memory for the largest interface.
- bmtInterface->ppInterfaceMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
+ bmtInterface->ppInterfaceMethodDescList = new (GetStackingAllocator()) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
- bmtInterface->ppInterfaceDeclMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
+ bmtInterface->ppInterfaceDeclMethodDescList = new (GetStackingAllocator()) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
}
EEClass *pParentClass = (IsInterface() || bmtParent->pParentMethodTable == NULL) ? NULL : bmtParent->pParentMethodTable->GetClass();
bmtVT->wCurrentNonVtableSlot = 0;
- bmtInterface->pppInterfaceImplementingMD = (MethodDesc ***) pThread->m_MarshalAlloc.Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
+ bmtInterface->pppInterfaceImplementingMD = (MethodDesc ***) GetStackingAllocator()->Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
memset(bmtInterface->pppInterfaceImplementingMD, 0, sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
- bmtInterface->pppInterfaceDeclaringMD = (MethodDesc ***) pThread->m_MarshalAlloc.Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
+ bmtInterface->pppInterfaceDeclaringMD = (MethodDesc ***) GetStackingAllocator()->Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
memset(bmtInterface->pppInterfaceDeclaringMD, 0, sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
return;
// Allocate some temporary storage. The number of overrides for a single method impl
// cannot be greater then the number of vtable slots.
- DWORD* slots = (DWORD*) new (&GetThread()->m_MarshalAlloc) DWORD[bmtVT->wCurrentVtableSlot];
- MethodDesc **replaced = new (&GetThread()->m_MarshalAlloc) MethodDesc*[bmtVT->wCurrentVtableSlot];
+ DWORD* slots = (DWORD*) new (GetStackingAllocator()) DWORD[bmtVT->wCurrentVtableSlot];
+ MethodDesc **replaced = new (GetStackingAllocator()) MethodDesc*[bmtVT->wCurrentVtableSlot];
while(pIndex < bmtMethodImpl->pIndex) {
if(bmtInterface->pdwOriginalStart == NULL)
{
Thread *pThread = GetThread();
- bmtInterface->pdwOriginalStart = new (&pThread->m_MarshalAlloc) DWORD[bmtInterface->dwMaxExpandedInterfaces];
+ bmtInterface->pdwOriginalStart = new (GetStackingAllocator()) DWORD[bmtInterface->dwMaxExpandedInterfaces];
memset(bmtInterface->pdwOriginalStart, 0, sizeof(DWORD)*bmtInterface->dwMaxExpandedInterfaces);
}
//
// Allocate the structures to keep track of the token pairs
//
- bmtMethodImpl->rgMethodImplTokens = new (&GetThread()->m_MarshalAlloc)
+ bmtMethodImpl->rgMethodImplTokens = new (GetStackingAllocator())
bmtMethodImplInfo::MethodImplTokenPair[bmtMethodImpl->dwNumberMethodImpls];
// Iterate through each MethodImpl declared on this class
//
// Allocate the structures to keep track of the impl matches
//
- bmtMethodImpl->pMethodDeclSubsts = new (&GetThread()->m_MarshalAlloc) Substitution[bmtMethodImpl->dwNumberMethodImpls];
- bmtMethodImpl->rgEntries = new (&GetThread()->m_MarshalAlloc) bmtMethodImplInfo::Entry[bmtMethodImpl->dwNumberMethodImpls];
+ bmtMethodImpl->pMethodDeclSubsts = new (GetStackingAllocator()) Substitution[bmtMethodImpl->dwNumberMethodImpls];
+ bmtMethodImpl->rgEntries = new (GetStackingAllocator()) bmtMethodImplInfo::Entry[bmtMethodImpl->dwNumberMethodImpls];
// These are used for verification
maxRidMD = pMDInternalImport->GetCountWithTokenKind(mdtMethodDef);
HRESULT hr = S_OK;
DWORD i;
- Thread *pThread = GetThread();
IMDInternalImport *pMDInternalImport = bmtType->pMDImport;
mdToken tok;
DWORD dwMemberAttrs;
// Allocate an array to contain the method tokens as well as information about the methods.
bmtMethod->cMethAndGaps = bmtMethod->hEnumMethod.EnumGetCount();
- bmtMethod->rgMethodTokens = new (&pThread->m_MarshalAlloc) mdToken[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodRVA = new (&pThread->m_MarshalAlloc) ULONG[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodAttrs = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodImplFlags = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodClassifications = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodTokens = new (GetStackingAllocator()) mdToken[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodRVA = new (GetStackingAllocator()) ULONG[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodAttrs = new (GetStackingAllocator()) DWORD[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodImplFlags = new (GetStackingAllocator()) DWORD[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodClassifications = new (GetStackingAllocator()) DWORD[bmtMethod->cMethAndGaps];
- bmtMethod->rgszMethodName = new (&pThread->m_MarshalAlloc) LPCSTR[bmtMethod->cMethAndGaps];
+ bmtMethod->rgszMethodName = new (GetStackingAllocator()) LPCSTR[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodImpl = new (&pThread->m_MarshalAlloc) BYTE[bmtMethod->cMethAndGaps];
- bmtMethod->rgMethodType = new (&pThread->m_MarshalAlloc) BYTE[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodImpl = new (GetStackingAllocator()) BYTE[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodType = new (GetStackingAllocator()) BYTE[bmtMethod->cMethAndGaps];
enum { SeenCtor = 1, SeenInvoke = 2, SeenBeginInvoke = 4, SeenEndInvoke = 8};
unsigned delegateMethodsSeen = 0;
CONTRACTL_END;
DWORD i;
- Thread *pThread = GetThread();
-
// Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
- bmtMethod->ppMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ bmtMethod->ppMethodDescList = new (GetStackingAllocator()) MethodDesc*[NumDeclaredMethods()];
ZeroMemory(bmtMethod->ppMethodDescList, NumDeclaredMethods() * sizeof(MethodDesc *));
// Create a temporary function table (we don't know how large the vtable will be until the very end,
if (IsValueClass())
{
bmtVT->dwMaxVtableSize += NumDeclaredMethods();
- bmtMethod->ppUnboxMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ bmtMethod->ppUnboxMethodDescList = new (GetStackingAllocator()) MethodDesc*[NumDeclaredMethods()];
ZeroMemory(bmtMethod->ppUnboxMethodDescList, NumDeclaredMethods() * sizeof(MethodDesc*));
}
}
// Allocate the temporary vtable
- bmtVT->pVtable = new (&pThread->m_MarshalAlloc)PCODE [bmtVT->dwMaxVtableSize];
+ bmtVT->pVtable = new (GetStackingAllocator())PCODE [bmtVT->dwMaxVtableSize];
ZeroMemory(bmtVT->pVtable, bmtVT->dwMaxVtableSize * sizeof(PCODE));
- bmtVT->pVtableMD = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtVT->dwMaxVtableSize];
+ bmtVT->pVtableMD = new (GetStackingAllocator()) MethodDesc*[bmtVT->dwMaxVtableSize];
ZeroMemory(bmtVT->pVtableMD, bmtVT->dwMaxVtableSize * sizeof(MethodDesc*));
// Allocate the temporary non-vtable
- bmtVT->pNonVtableMD = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ bmtVT->pNonVtableMD = new (GetStackingAllocator()) MethodDesc*[NumDeclaredMethods()];
ZeroMemory(bmtVT->pNonVtableMD, sizeof(MethodDesc*) * NumDeclaredMethods());
if (bmtParent->pParentMethodTable != NULL)
if (NumDeclaredMethods() > 0)
{
bmtParent->ppParentMethodDescBuf = (MethodDesc **)
- pThread->m_MarshalAlloc.Alloc(S_UINT32(2) * S_UINT32(NumDeclaredMethods()) *
+ GetStackingAllocator()->Alloc(S_UINT32(2) * S_UINT32(NumDeclaredMethods()) *
S_UINT32(sizeof(MethodDesc*)));
bmtParent->ppParentMethodDescBufPtr = bmtParent->ppParentMethodDescBuf;
{
STANDARD_VM_CONTRACT;
- Thread *pThread = GetThread();
- MethodNameHash *pHash = new (&pThread->m_MarshalAlloc) MethodNameHash();
+ MethodNameHash *pHash = new (GetStackingAllocator()) MethodNameHash();
- pHash->Init(pMT->GetNumVirtuals(), &(pThread->m_MarshalAlloc));
+ pHash->Init(pMT->GetNumVirtuals(), GetStackingAllocator());
MethodTable::MethodIterator it(pMT);
for (;it.IsValid(); it.Next())
class MethodTableBuilder
{
public:
- MethodTableBuilder(MethodTable * pMT)
+ MethodTableBuilder(MethodTable * pMT, StackingAllocator *pStackingAllocator)
{
LIMITED_METHOD_CONTRACT;
m_pHalfBakedMT = pMT;
m_pHalfBakedClass = pMT->GetClass();
+ m_pStackingAllocator = pStackingAllocator;
NullBMTData();
}
public:
// <NICE> Get rid of this.</NICE>
EEClass *m_pHalfBakedClass;
MethodTable * m_pHalfBakedMT;
+ StackingAllocator *m_pStackingAllocator;
+
+ StackingAllocator* GetStackingAllocator() { return m_pStackingAllocator; }
// GetHalfBakedClass: The EEClass you get back from this function may not have all its fields filled in yet.
// Thus you have to make sure that the relevant item which you are accessing has
// should fall back to the slow GetValue if GetValueSpeculative returns false.
// Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData);
+ BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue);
DWORD GetHash(KeyType Key);
DWORD GetCount();
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
+ MODE_COOPERATIVE;
+#endif
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pItem = FindItemSpeculative(pKey, hashValue);
+
+ if (pItem != NULL)
+ {
+ *pData = pItem->Data;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey)
{
CONTRACTL
PRECONDITION(GetParentMethodTable() == NULL || GetParentMethodTable()->LookupComInteropData() != NULL);
} CONTRACTL_END;
- ClassCompat::MethodTableBuilder builder(this);
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
+
+ ClassCompat::MethodTableBuilder builder(this, pStackingAllocator);
InteropMethodTableData *pData = builder.BuildInteropVTable(pamTracker);
_ASSERTE(pData);
{
BuildMethodTableThrowException(BFA_METHOD_TOKEN_OUT_OF_RANGE);
}
-
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(tok, &cMemberSignature, &pMemberSignature)))
+ {
+ BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
+ }
if (FAILED(pMDInternalImport->GetMethodDefProps(tok, &dwMemberAttrs)))
{
BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
}
+
+ // Signature validation
+ if (!bmtProp->fNoSanityChecks)
+ {
+ hr = validateTokenSig(tok,pMemberSignature,cMemberSignature,dwMemberAttrs,pMDInternalImport);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
+ }
+ }
if (IsMdRTSpecialName(dwMemberAttrs) || IsMdVirtual(dwMemberAttrs) || IsDelegate())
{
if (FAILED(pMDInternalImport->GetNameOfMethodDef(tok, (LPCSTR *)&strMethodName)))
DWORD numGenericMethodArgs = 0;
{
- HENUMInternalHolder hEnumTyPars(pMDInternalImport);
- hr = hEnumTyPars.EnumInitNoThrow(mdtGenericParam, tok);
+ SigParser genericArgParser(pMemberSignature, cMemberSignature);
+ ULONG ulCallConv;
+ hr = genericArgParser.GetCallingConvInfo(&ulCallConv);
if (FAILED(hr))
{
BuildMethodTableThrowException(hr, *bmtError);
}
- numGenericMethodArgs = hEnumTyPars.EnumGetCount();
-
- // We do not want to support context-bound objects with generic methods.
-
- if (numGenericMethodArgs != 0)
+ // Only read the generic parameter table if the method signature is generic
+ if (ulCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
{
- HENUMInternalHolder hEnumGenericPars(pMDInternalImport);
+ HENUMInternalHolder hEnumTyPars(pMDInternalImport);
+ hr = hEnumTyPars.EnumInitNoThrow(mdtGenericParam, tok);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ numGenericMethodArgs = hEnumTyPars.EnumGetCount();
- hEnumGenericPars.EnumInit(mdtGenericParam, tok);
+ // We do not want to support context-bound objects with generic methods.
- for (unsigned methIdx = 0; methIdx < numGenericMethodArgs; methIdx++)
+ if (numGenericMethodArgs != 0)
{
- mdGenericParam tkTyPar;
- pMDInternalImport->EnumNext(&hEnumGenericPars, &tkTyPar);
- DWORD flags;
- if (FAILED(pMDInternalImport->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
- {
- BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
- }
-
- if (0 != (flags & ~(gpVarianceMask | gpSpecialConstraintMask)))
- {
- BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
- }
- switch (flags & gpVarianceMask)
- {
- case gpNonVariant:
- break;
+ HENUMInternalHolder hEnumGenericPars(pMDInternalImport);
- case gpCovariant: // intentional fallthru
- case gpContravariant:
- BuildMethodTableThrowException(VLDTR_E_GP_ILLEGAL_VARIANT_MVAR);
- break;
+ hEnumGenericPars.EnumInit(mdtGenericParam, tok);
- default:
+ for (unsigned methIdx = 0; methIdx < numGenericMethodArgs; methIdx++)
+ {
+ mdGenericParam tkTyPar;
+ pMDInternalImport->EnumNext(&hEnumGenericPars, &tkTyPar);
+ DWORD flags;
+ if (FAILED(pMDInternalImport->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
+ {
BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
- }
+ }
+
+ if (0 != (flags & ~(gpVarianceMask | gpSpecialConstraintMask)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ switch (flags & gpVarianceMask)
+ {
+ case gpNonVariant:
+ break;
+
+ case gpCovariant: // intentional fallthru
+ case gpContravariant:
+ BuildMethodTableThrowException(VLDTR_E_GP_ILLEGAL_VARIANT_MVAR);
+ break;
+
+ default:
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
}
}
}
// But first - minimal flags validity checks
//
// No methods in Enums!
- if (fIsClassEnum)
- {
- BuildMethodTableThrowException(BFA_METHOD_IN_A_ENUM);
- }
- // RVA : 0
- if (dwMethodRVA != 0)
+#ifndef _DEBUG // Don't run the minimal validity checks for the system dll/r2r dlls (except in debug builds so we don't build a bad system dll)
+ if (!bmtProp->fNoSanityChecks)
+#endif
{
- if(fIsClassComImport)
+ if (fIsClassEnum)
{
- BuildMethodTableThrowException(BFA_METHOD_WITH_NONZERO_RVA);
+ BuildMethodTableThrowException(BFA_METHOD_IN_A_ENUM);
}
- if(IsMdAbstract(dwMemberAttrs))
+ // RVA : 0
+ if (dwMethodRVA != 0)
{
- BuildMethodTableThrowException(BFA_ABSTRACT_METHOD_WITH_RVA);
- }
- if(IsMiRuntime(dwImplFlags))
- {
- BuildMethodTableThrowException(BFA_RUNTIME_METHOD_WITH_RVA);
- }
- if(IsMiInternalCall(dwImplFlags))
- {
- BuildMethodTableThrowException(BFA_INTERNAL_METHOD_WITH_RVA);
+ if(fIsClassComImport)
+ {
+ BuildMethodTableThrowException(BFA_METHOD_WITH_NONZERO_RVA);
+ }
+ if(IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_ABSTRACT_METHOD_WITH_RVA);
+ }
+ if(IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_RUNTIME_METHOD_WITH_RVA);
+ }
+ if(IsMiInternalCall(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_INTERNAL_METHOD_WITH_RVA);
+ }
}
- }
- // Abstract / not abstract
- if(IsMdAbstract(dwMemberAttrs))
- {
- if(fIsClassNotAbstract)
- {
- BuildMethodTableThrowException(BFA_AB_METHOD_IN_AB_CLASS);
- }
- if(!IsMdVirtual(dwMemberAttrs))
+ // Abstract / not abstract
+ if(IsMdAbstract(dwMemberAttrs))
{
- BuildMethodTableThrowException(BFA_NONVIRT_AB_METHOD);
+ if(fIsClassNotAbstract)
+ {
+ BuildMethodTableThrowException(BFA_AB_METHOD_IN_AB_CLASS);
+ }
+ if(!IsMdVirtual(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_AB_METHOD);
+ }
}
- }
- else if(fIsClassInterface)
- {
- if (IsMdRTSpecialName(dwMemberAttrs))
+ else if(fIsClassInterface)
{
- CONSISTENCY_CHECK(CheckPointer(strMethodName));
- if (strcmp(strMethodName, COR_CCTOR_METHOD_NAME))
+ if (IsMdRTSpecialName(dwMemberAttrs))
{
- BuildMethodTableThrowException(BFA_NONAB_NONCCTOR_METHOD_ON_INT);
+ CONSISTENCY_CHECK(CheckPointer(strMethodName));
+ if (strcmp(strMethodName, COR_CCTOR_METHOD_NAME))
+ {
+ BuildMethodTableThrowException(BFA_NONAB_NONCCTOR_METHOD_ON_INT);
+ }
}
}
- }
- // Virtual / not virtual
- if(IsMdVirtual(dwMemberAttrs))
- {
- if(IsMdPinvokeImpl(dwMemberAttrs))
- {
- BuildMethodTableThrowException(BFA_VIRTUAL_PINVOKE_METHOD);
- }
- if(IsMdStatic(dwMemberAttrs))
+ // Virtual / not virtual
+ if(IsMdVirtual(dwMemberAttrs))
{
- BuildMethodTableThrowException(BFA_VIRTUAL_STATIC_METHOD);
- }
- if(strMethodName && (0==strcmp(strMethodName, COR_CTOR_METHOD_NAME)))
- {
- BuildMethodTableThrowException(BFA_VIRTUAL_INSTANCE_CTOR);
+ if(IsMdPinvokeImpl(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_PINVOKE_METHOD);
+ }
+ if(IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_STATIC_METHOD);
+ }
+ if(strMethodName && (0==strcmp(strMethodName, COR_CTOR_METHOD_NAME)))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_INSTANCE_CTOR);
+ }
}
- }
- // Some interface checks.
- // We only need them if default interface method support is disabled or if this is fragile crossgen
+ // Some interface checks.
+ // We only need them if default interface method support is disabled or if this is fragile crossgen
#if !defined(FEATURE_DEFAULT_INTERFACES) || defined(FEATURE_NATIVE_IMAGE_GENERATION)
- if (fIsClassInterface
+ if (fIsClassInterface
#if defined(FEATURE_DEFAULT_INTERFACES)
- // Only fragile crossgen wasn't upgraded to deal with default interface methods.
- && !IsReadyToRunCompilation()
+ // Only fragile crossgen wasn't upgraded to deal with default interface methods.
+ && !IsReadyToRunCompilation()
#endif
- )
- {
- if (IsMdVirtual(dwMemberAttrs))
+ )
{
- if (!IsMdAbstract(dwMemberAttrs))
+ if (IsMdVirtual(dwMemberAttrs))
{
- BuildMethodTableThrowException(BFA_VIRTUAL_NONAB_INT_METHOD);
+ if (!IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_NONAB_INT_METHOD);
+ }
}
- }
- else
- {
- // Instance method
- if (!IsMdStatic(dwMemberAttrs))
+ else
{
- BuildMethodTableThrowException(BFA_NONVIRT_INST_INT_METHOD);
+ // Instance method
+ if (!IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_INST_INT_METHOD);
+ }
}
}
- }
#endif // !defined(FEATURE_DEFAULT_INTERFACES) || defined(FEATURE_NATIVE_IMAGE_GENERATION)
- // No synchronized methods in ValueTypes
- if(fIsClassValueType && IsMiSynchronized(dwImplFlags))
- {
- BuildMethodTableThrowException(BFA_SYNC_METHOD_IN_VT);
- }
-
- // Global methods:
- if(IsGlobalClass())
- {
- if(!IsMdStatic(dwMemberAttrs))
+ // No synchronized methods in ValueTypes
+ if(fIsClassValueType && IsMiSynchronized(dwImplFlags))
{
- BuildMethodTableThrowException(BFA_NONSTATIC_GLOBAL_METHOD);
+ BuildMethodTableThrowException(BFA_SYNC_METHOD_IN_VT);
}
- if (strMethodName) //<TODO>@todo: investigate mc++ generating null name</TODO>
+
+ // Global methods:
+ if(IsGlobalClass())
{
- if(0==strcmp(strMethodName, COR_CTOR_METHOD_NAME))
+ if(!IsMdStatic(dwMemberAttrs))
{
- BuildMethodTableThrowException(BFA_GLOBAL_INST_CTOR);
+ BuildMethodTableThrowException(BFA_NONSTATIC_GLOBAL_METHOD);
+ }
+ if (strMethodName) //<TODO>@todo: investigate mc++ generating null name</TODO>
+ {
+ if(0==strcmp(strMethodName, COR_CTOR_METHOD_NAME))
+ {
+ BuildMethodTableThrowException(BFA_GLOBAL_INST_CTOR);
+ }
}
}
- }
- //@GENERICS:
- // Generic methods or methods in generic classes
- // may not be part of a COM Import class (except for WinRT), PInvoke, internal call outside mscorlib.
- if ((bmtGenerics->GetNumGenericArgs() != 0 || numGenericMethodArgs != 0) &&
- (
+ //@GENERICS:
+ // Generic methods or methods in generic classes
+ // may not be part of a COM Import class (except for WinRT), PInvoke, internal call outside mscorlib.
+ if ((bmtGenerics->GetNumGenericArgs() != 0 || numGenericMethodArgs != 0) &&
+ (
#ifdef FEATURE_COMINTEROP
- fIsClassComImport ||
- bmtProp->fComEventItfType ||
+ fIsClassComImport ||
+ bmtProp->fComEventItfType ||
#endif // FEATURE_COMINTEROP
- IsMdPinvokeImpl(dwMemberAttrs) ||
- (IsMiInternalCall(dwImplFlags) && !GetModule()->IsSystem())))
- {
+ IsMdPinvokeImpl(dwMemberAttrs) ||
+ (IsMiInternalCall(dwImplFlags) && !GetModule()->IsSystem())))
+ {
#ifdef FEATURE_COMINTEROP
- if (!GetHalfBakedClass()->IsProjectedFromWinRT())
+ if (!GetHalfBakedClass()->IsProjectedFromWinRT())
#endif // FEATURE_COMINTEROP
- {
- BuildMethodTableThrowException(BFA_BAD_PLACE_FOR_GENERIC_METHOD);
+ {
+ BuildMethodTableThrowException(BFA_BAD_PLACE_FOR_GENERIC_METHOD);
+ }
}
- }
-
- // Generic methods may not be marked "runtime". However note that
- // methods in generic delegate classes are, hence we don't apply this to
- // methods in generic classes in general.
- if (numGenericMethodArgs != 0 && IsMiRuntime(dwImplFlags))
- {
- BuildMethodTableThrowException(BFA_GENERIC_METHOD_RUNTIME_IMPL);
- }
-
- // Signature validation
- if (FAILED(pMDInternalImport->GetSigOfMethodDef(tok, &cMemberSignature, &pMemberSignature)))
- {
- BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
- }
- hr = validateTokenSig(tok,pMemberSignature,cMemberSignature,dwMemberAttrs,pMDInternalImport);
- if (FAILED(hr))
- {
- BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
- }
+ // Generic methods may not be marked "runtime". However note that
+ // methods in generic delegate classes are, hence we don't apply this to
+ // methods in generic classes in general.
+ if (numGenericMethodArgs != 0 && IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_GENERIC_METHOD_RUNTIME_IMPL);
+ }
- // Check the appearance of covariant and contravariant in the method signature
- // Note that variance is only supported for interfaces
- if (bmtGenerics->pVarianceInfo != NULL)
- {
- SigPointer sp(pMemberSignature, cMemberSignature);
- ULONG callConv;
- IfFailThrow(sp.GetCallingConvInfo(&callConv));
+ // Check the appearance of covariant and contravariant in the method signature
+ // Note that variance is only supported for interfaces
+ if (bmtGenerics->pVarianceInfo != NULL)
+ {
+ SigPointer sp(pMemberSignature, cMemberSignature);
+ ULONG callConv;
+ IfFailThrow(sp.GetCallingConvInfo(&callConv));
- if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
- IfFailThrow(sp.GetData(NULL));
+ if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ IfFailThrow(sp.GetData(NULL));
- DWORD numArgs;
- IfFailThrow(sp.GetData(&numArgs));
+ DWORD numArgs;
+ IfFailThrow(sp.GetData(&numArgs));
- // Return type behaves covariantly
- if (!EEClass::CheckVarianceInSig(
- bmtGenerics->GetNumGenericArgs(),
- bmtGenerics->pVarianceInfo,
- GetModule(),
- sp,
- gpCovariant))
- {
- BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_RESULT, tok);
- }
- IfFailThrow(sp.SkipExactlyOne());
- for (DWORD j = 0; j < numArgs; j++)
- {
- // Argument types behave contravariantly
- if (!EEClass::CheckVarianceInSig(bmtGenerics->GetNumGenericArgs(),
- bmtGenerics->pVarianceInfo,
- GetModule(),
- sp,
- gpContravariant))
+ // Return type behaves covariantly
+ if (!EEClass::CheckVarianceInSig(
+ bmtGenerics->GetNumGenericArgs(),
+ bmtGenerics->pVarianceInfo,
+ GetModule(),
+ sp,
+ gpCovariant))
{
- BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_ARG, tok);
+ BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_RESULT, tok);
}
IfFailThrow(sp.SkipExactlyOne());
+ for (DWORD j = 0; j < numArgs; j++)
+ {
+ // Argument types behave contravariantly
+ if (!EEClass::CheckVarianceInSig(bmtGenerics->GetNumGenericArgs(),
+ bmtGenerics->pVarianceInfo,
+ GetModule(),
+ sp,
+ gpContravariant))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_ARG, tok);
+ }
+ IfFailThrow(sp.SkipExactlyOne());
+ }
}
}
// it might contain overrides for other interface methods.
DWORD dwMaxSlotSize = IsInterface() ? bmtMethod->dwNumberMethodImpls : bmtVT->cVirtualSlots;
- DWORD * slots = new (&GetThread()->m_MarshalAlloc) DWORD[dwMaxSlotSize];
- mdToken * tokens = new (&GetThread()->m_MarshalAlloc) mdToken[dwMaxSlotSize];
- RelativePointer<MethodDesc *> * replaced = new (&GetThread()->m_MarshalAlloc) RelativePointer<MethodDesc*>[dwMaxSlotSize];
+ DWORD * slots = new (GetStackingAllocator()) DWORD[dwMaxSlotSize];
+ mdToken * tokens = new (GetStackingAllocator()) mdToken[dwMaxSlotSize];
+ RelativePointer<MethodDesc *> * replaced = new (GetStackingAllocator()) RelativePointer<MethodDesc*>[dwMaxSlotSize];
DWORD iEntry = 0;
bmtMDMethod * pCurImplMethod = bmtMethodImpl->GetImplementationMethod(iEntry);
// First we do a GetCheckpoint for the thread-based allocator. ExpandExactInheritedInterfaces allocates substitution chains
// on the thread allocator rather than on the stack.
- Thread * pThread = GetThread();
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
// ***********************************************************
// ****** This must be consistent with code:ExpandApproxInterface etc. *******
// ***********************************************************
bmtExactInterfaceInfo bmtExactInterface;
- bmtExactInterface.pInterfaceSubstitution = new (&pThread->m_MarshalAlloc) Substitution[pMT->GetNumInterfaces()];
+ bmtExactInterface.pInterfaceSubstitution = new (pStackingAllocator) Substitution[pMT->GetNumInterfaces()];
bmtExactInterface.pExactMTs = pExactMTs;
bmtExactInterface.nAssigned = 0;
bmtExactInterface.typeContext = typeContext;
// Do the interfaces inherited from a parent class
if ((pParentMT != NULL) && (pParentMT->GetNumInterfaces() > 0))
{
- Substitution * pParentSubstForTypeLoad = new (&pThread->m_MarshalAlloc) Substitution(
+ Substitution * pParentSubstForTypeLoad = new (pStackingAllocator) Substitution(
pMT->GetSubstitutionForParent(NULL));
- Substitution * pParentSubstForComparing = new (&pThread->m_MarshalAlloc) Substitution(
+ Substitution * pParentSubstForComparing = new (pStackingAllocator) Substitution(
pMT->GetSubstitutionForParent(NULL));
ExpandExactInheritedInterfaces(
&bmtExactInterface,
pParentMT,
pParentSubstForTypeLoad,
- pParentSubstForComparing);
+ pParentSubstForComparing,
+ pStackingAllocator);
}
#ifdef _DEBUG
//#ExactInterfaceMap_SupersetOfParent
// If there are any __Canon instances in the type argument list, then we defer the
// ambiguity checking until an exact instantiation.
- if (!pMT->IsSharedByGenericInstantiations())
+ // As the C# compiler won't allow an ambiguous generic interface to be generated, we don't
+ // need this logic for CoreLib. We can't use the sanity checks flag here, as these ambiguities
+ // are specified to the exact instantiation in use, not just whether or not normal the type is
+ // well formed in metadata.
+ if (!pMT->IsSharedByGenericInstantiations() && !pMT->GetModule()->IsSystem())
{
// There are no __Canon types in the instantiation, so do ambiguity check.
bmtInterfaceAmbiguityCheckInfo bmtCheckInfo;
bmtCheckInfo.pMT = pMT;
- bmtCheckInfo.ppInterfaceSubstitutionChains = new (&pThread->m_MarshalAlloc) Substitution *[pMT->GetNumInterfaces()];
- bmtCheckInfo.ppExactDeclaredInterfaces = new (&pThread->m_MarshalAlloc) MethodTable *[pMT->GetNumInterfaces()];
+ bmtCheckInfo.ppInterfaceSubstitutionChains = new (pStackingAllocator) Substitution *[pMT->GetNumInterfaces()];
+ bmtCheckInfo.ppExactDeclaredInterfaces = new (pStackingAllocator) MethodTable *[pMT->GetNumInterfaces()];
bmtCheckInfo.nAssigned = 0;
bmtCheckInfo.typeContext = typeContext;
- MethodTableBuilder::InterfacesAmbiguityCheck(&bmtCheckInfo, pMT->GetModule(), pMT->GetCl(), NULL);
+ MethodTableBuilder::InterfacesAmbiguityCheck(&bmtCheckInfo, pMT->GetModule(), pMT->GetCl(), NULL, pStackingAllocator);
}
// OK, there is no ambiguity amongst the instantiated interfaces declared on this class.
pMT->GetModule(),
pMT->GetCl(),
NULL,
- NULL
+ NULL,
+ pStackingAllocator
COMMA_INDEBUG(pMT));
CONSISTENCY_CHECK(bmtExactInterface.nAssigned == pMT->GetNumInterfaces());
bmtExactInterfaceInfo * bmtInfo,
MethodTable * pMT,
const Substitution * pSubstForTypeLoad,
- Substitution * pSubstForComparing)
+ Substitution * pSubstForComparing,
+ StackingAllocator * pStackingAllocator)
{
STANDARD_VM_CONTRACT;
if (pParentMT)
{
// Chain parent's substitution for exact type load
- Substitution * pParentSubstForTypeLoad = new (&GetThread()->m_MarshalAlloc) Substitution(
+ Substitution * pParentSubstForTypeLoad = new (pStackingAllocator) Substitution(
pMT->GetSubstitutionForParent(pSubstForTypeLoad));
// Chain parent's substitution for comparing interfaces (note that this type is temporarily
// considered as open type)
- Substitution * pParentSubstForComparing = new (&GetThread()->m_MarshalAlloc) Substitution(
+ Substitution * pParentSubstForComparing = new (pStackingAllocator) Substitution(
pMT->GetSubstitutionForParent(pSubstForComparing));
ExpandExactInheritedInterfaces(
bmtInfo,
pParentMT,
pParentSubstForTypeLoad,
- pParentSubstForComparing);
+ pParentSubstForComparing,
+ pStackingAllocator);
}
ExpandExactDeclaredInterfaces(
bmtInfo,
pMT->GetModule(),
pMT->GetCl(),
pSubstForTypeLoad,
- pSubstForComparing
+ pSubstForComparing,
+ pStackingAllocator
COMMA_INDEBUG(pMT));
// Restore type's subsitution chain for comparing interfaces
Module * pModule,
mdToken typeDef,
const Substitution * pSubstForTypeLoad,
- Substitution * pSubstForComparing
+ Substitution * pSubstForComparing,
+ StackingAllocator * pStackingAllocator
COMMA_INDEBUG(MethodTable * dbg_pClassMT))
{
STANDARD_VM_CONTRACT;
bmtInfo,
pInterface,
&ifaceSubstForTypeLoad,
- &ifaceSubstForComparing
+ &ifaceSubstForComparing,
+ pStackingAllocator
COMMA_INDEBUG(dbg_pClassMT));
}
if (FAILED(hr))
bmtExactInterfaceInfo * bmtInfo,
MethodTable * pIntf,
const Substitution * pSubstForTypeLoad_OnStack, // Allocated on stack!
- const Substitution * pSubstForComparing_OnStack // Allocated on stack!
+ const Substitution * pSubstForComparing_OnStack, // Allocated on stack!
+ StackingAllocator * pStackingAllocator
COMMA_INDEBUG(MethodTable * dbg_pClassMT))
{
STANDARD_VM_CONTRACT;
bmtInfo->pInterfaceSubstitution[n] = *pSubstForComparing_OnStack;
bmtInfo->nAssigned++;
- Substitution * pSubstForTypeLoad = new (&GetThread()->m_MarshalAlloc) Substitution(*pSubstForTypeLoad_OnStack);
+ Substitution * pSubstForTypeLoad = new (pStackingAllocator) Substitution(*pSubstForTypeLoad_OnStack);
ExpandExactDeclaredInterfaces(
bmtInfo,
pIntf->GetModule(),
pIntf->GetCl(),
pSubstForTypeLoad,
- &bmtInfo->pInterfaceSubstitution[n]
+ &bmtInfo->pInterfaceSubstitution[n],
+ pStackingAllocator
COMMA_INDEBUG(dbg_pClassMT));
} // MethodTableBuilder::ExpandExactInterface
void MethodTableBuilder::InterfacesAmbiguityCheck(bmtInterfaceAmbiguityCheckInfo *bmtCheckInfo,
Module *pModule,
mdToken typeDef,
- const Substitution *pSubstChain)
+ const Substitution *pSubstChain,
+ StackingAllocator *pStackingAllocator)
{
STANDARD_VM_CONTRACT;
CLASS_LOAD_EXACTPARENTS,
TRUE,
pSubstChain).GetMethodTable();
- InterfaceAmbiguityCheck(bmtCheckInfo, ie.CurrentSubst(), pInterface);
+ InterfaceAmbiguityCheck(bmtCheckInfo, ie.CurrentSubst(), pInterface, pStackingAllocator);
}
if (FAILED(hr))
{
//*******************************************************************************
void MethodTableBuilder::InterfaceAmbiguityCheck(bmtInterfaceAmbiguityCheckInfo *bmtCheckInfo,
const Substitution *pItfSubstChain,
- MethodTable *pIntf)
+ MethodTable *pIntf,
+ StackingAllocator *pStackingAllocator)
{
STANDARD_VM_CONTRACT;
DWORD n = bmtCheckInfo->nAssigned;
bmtCheckInfo->ppExactDeclaredInterfaces[n] = pIntf;
- bmtCheckInfo->ppInterfaceSubstitutionChains[n] = new (&GetThread()->m_MarshalAlloc) Substitution[pItfSubstChain->GetLength()];
+ bmtCheckInfo->ppInterfaceSubstitutionChains[n] = new (pStackingAllocator) Substitution[pItfSubstChain->GetLength()];
pItfSubstChain->CopyToArray(bmtCheckInfo->ppInterfaceSubstitutionChains[n]);
bmtCheckInfo->nAssigned++;
- InterfacesAmbiguityCheck(bmtCheckInfo,pIntf->GetModule(),pIntf->GetCl(),pItfSubstChain);
+ InterfacesAmbiguityCheck(bmtCheckInfo,pIntf->GetModule(),pIntf->GetCl(),pItfSubstChain, pStackingAllocator);
}
Module * pModule,
mdTypeDef cl,
Instantiation inst,
- bmtGenericsInfo * bmtGenericsInfo)
+ bmtGenericsInfo * bmtGenericsInfo,
+ StackingAllocator*pStackingAllocator)
{
CONTRACTL
{
bmtGenericsInfo->numDicts = 1;
mdGenericParam tkTyPar;
- bmtGenericsInfo->pVarianceInfo = new (&GetThread()->m_MarshalAlloc) BYTE[numGenericArgs];
+ bmtGenericsInfo->pVarianceInfo = new (pStackingAllocator) BYTE[numGenericArgs];
// If it has generic arguments but none have been specified, then load the instantiation at the formals
if (inst.IsEmpty())
{
bmtGenericsInfo->fTypicalInstantiation = TRUE;
S_UINT32 scbAllocSize = S_UINT32(numGenericArgs) * S_UINT32(sizeof(TypeHandle));
- TypeHandle * genericArgs = (TypeHandle *) GetThread()->m_MarshalAlloc.Alloc(scbAllocSize);
+ TypeHandle * genericArgs = (TypeHandle *) pStackingAllocator->Alloc(scbAllocSize);
inst = Instantiation(genericArgs, numGenericArgs);
MethodTable * pMT = NULL;
- Thread * pThread = GetThread();
-
MethodTable * pParentMethodTable = NULL;
SigPointer parentInst;
mdTypeDef tdEnclosing = mdTypeDefNil;
// used during class loading.
// <NICE> Ideally a debug/checked build should pass around tokens indicating the Checkpoint
// being used and check these dynamically </NICE>
- CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ ACQUIRE_STACKING_ALLOCATOR(pStackingAllocator);
// Gather up generics info
- MethodTableBuilder::GatherGenericsInfo(pModule, cl, inst, &genericsInfo);
+ MethodTableBuilder::GatherGenericsInfo(pModule, cl, inst, &genericsInfo, pStackingAllocator);
Module * pLoaderModule = pModule;
if (!inst.IsEmpty())
DWORD i;
// Allocate the BuildingInterfaceList table
- pInterfaceBuildInfo = new (&GetThread()->m_MarshalAlloc) BuildingInterfaceInfo_t[cInterfaces];
+ pInterfaceBuildInfo = new (pStackingAllocator) BuildingInterfaceInfo_t[cInterfaces];
mdInterfaceImpl ii;
for (i = 0; pInternalImport->EnumNext(&hEnumInterfaceImpl, &ii); i++)
pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
}
- pLayoutRawFieldInfos = (LayoutRawFieldInfo *)GetThread()->m_MarshalAlloc.Alloc(
+ pLayoutRawFieldInfos = (LayoutRawFieldInfo *)pStackingAllocator->Alloc(
(S_UINT32(1) + S_UINT32(cFields)) * S_UINT32(sizeof(LayoutRawFieldInfo)));
{
MethodTableBuilder builder(
NULL,
pClass,
- &GetThread()->m_MarshalAlloc,
+ pStackingAllocator,
pamTracker);
pMT = builder.BuildMethodTableThrowing(
static void GatherGenericsInfo(Module *pModule,
mdTypeDef cl,
Instantiation inst,
- bmtGenericsInfo *bmtGenericsInfo);
+ bmtGenericsInfo *bmtGenericsInfo,
+ StackingAllocator *pStackingAllocator);
MethodTable *
BuildMethodTableThrowing(
bmtExactInterfaceInfo * bmtInfo,
MethodTable * pIntf,
const Substitution * pSubstForTypeLoad_OnStack, // Allocated on stack!
- const Substitution * pSubstForComparing_OnStack // Allocated on stack!
+ const Substitution * pSubstForComparing_OnStack, // Allocated on stack!
+ StackingAllocator * pStackingAllocator
COMMA_INDEBUG(MethodTable * dbg_pClassMT));
public:
Module * pModule,
mdToken typeDef,
const Substitution * pSubstForTypeLoad,
- Substitution * pSubstForComparing
+ Substitution * pSubstForComparing,
+ StackingAllocator * pStackingAllocator
COMMA_INDEBUG(MethodTable * dbg_pClassMT));
static void
bmtExactInterfaceInfo * bmtInfo,
MethodTable * pParentMT,
const Substitution * pSubstForTypeLoad,
- Substitution * pSubstForComparing);
+ Substitution * pSubstForComparing,
+ StackingAllocator * pStackingAllocator);
public:
// --------------------------------------------------------------------------------------------
bmtInterfaceAmbiguityCheckInfo *,
Module *pModule,
mdToken typeDef,
- const Substitution *pSubstChain);
+ const Substitution *pSubstChain,
+ StackingAllocator *pStackingAllocator);
private:
static void
InterfaceAmbiguityCheck(
bmtInterfaceAmbiguityCheckInfo *,
const Substitution *pSubstChain,
- MethodTable *pIntfMT);
+ MethodTable *pIntfMT,
+ StackingAllocator *pStackingAllocator);
public:
static void
m_FirstBlock = NULL;
m_FirstFree = NULL;
- m_InitialBlock = NULL;
m_DeferredFreeBlock = NULL;
#ifdef _DEBUG
m_MaxAlloc = 0;
#endif
- Init(true);
+ Init();
}
+StackingAllocator::InitialStackBlock::InitialStackBlock()
+{
+ m_initialBlockHeader.m_Next = NULL;
+ m_initialBlockHeader.m_Length = sizeof(m_dataSpace);
+ INDEBUG(m_initialBlockHeader.m_Sentinal = 0);
+}
StackingAllocator::~StackingAllocator()
{
}
CONTRACTL_END;
- Clear(NULL);
+ Clear(&m_InitialBlock.m_initialBlockHeader);
if (m_DeferredFreeBlock)
{
#endif
}
+void StackingAllocator::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_FirstBlock = &m_InitialBlock.m_initialBlockHeader;
+ m_FirstFree = m_FirstBlock->GetData();
+ _ASSERTE((void*)m_FirstFree == (void*)m_InitialBlock.m_dataSpace);
+ m_BytesLeft = static_cast<unsigned>(m_FirstBlock->m_Length);
+}
+
// Lightweight initial checkpoint
Checkpoint StackingAllocator::s_initialCheckpoint;
+void StackingAllocator::StoreCheckpoint(Checkpoint *c)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ m_CheckpointDepth++;
+ m_Checkpoints++;
+#endif
+
+ // Record previous allocator state in it.
+ c->m_OldBlock = m_FirstBlock;
+ c->m_OldBytesLeft = m_BytesLeft;
+}
+
void *StackingAllocator::GetCheckpoint()
{
CONTRACTL {
// a special marker, s_initialCheckpoint). This is because we know how to restore the
// allocator state on a Collapse without having to store any additional
// context info.
- if ((m_InitialBlock == NULL) || (m_FirstFree == m_InitialBlock->m_Data))
+ if (m_FirstFree == m_InitialBlock.m_dataSpace)
return &s_initialCheckpoint;
// Remember the current allocator state.
// limit of MinBlockSize and an upper limit of MaxBlockSize. If the
// request is larger than MaxBlockSize then allocate exactly that
// amount.
- // Additionally, if we don't have an initial block yet, use an increased
- // lower bound for the size, since we intend to cache this block.
- unsigned lower = m_InitialBlock ? MinBlockSize : InitBlockSize;
+ unsigned lower = MinBlockSize;
size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), MaxBlockSize));
// Allocate the block.
#endif
}
- // If this is the first block allocated, we record that fact since we
- // intend to cache it.
- if (m_InitialBlock == NULL)
- {
- _ASSERTE((m_FirstBlock == NULL) && (m_FirstFree == NULL) && (m_BytesLeft == 0));
- m_InitialBlock = b;
- }
-
// Link new block to head of block chain and update internal state to
// start allocating from this new block.
b->m_Next = m_FirstBlock;
m_FirstBlock = b;
- m_FirstFree = b->m_Data;
+ m_FirstFree = b->GetData();
// the cast below is safe because b->m_Length is less than MaxBlockSize (4096)
m_BytesLeft = static_cast<unsigned>(b->m_Length);
// Special case collapsing back to the initial checkpoint.
if (c == &s_initialCheckpoint || c->m_OldBlock == NULL) {
- Clear(m_InitialBlock);
- Init(false);
+ Clear(&m_InitialBlock.m_initialBlockHeader);
+ Init();
// confirm no buffer overruns
INDEBUG(Validate(m_FirstBlock, m_FirstFree));
// Restore former allocator state.
m_FirstBlock = pOldBlock;
- m_FirstFree = &pOldBlock->m_Data[pOldBlock->m_Length - iOldBytesLeft];
+ m_FirstFree = &pOldBlock->GetData()[pOldBlock->m_Length - iOldBytesLeft];
m_BytesLeft = iOldBytesLeft;
// confirm no buffer overruns
INDEBUG(Validate(m_FirstBlock, m_FirstFree));
}
+#ifdef _DEBUG
+void StackingAllocator::Validate(StackBlock *block, void* spot)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!block)
+ return;
+ _ASSERTE(m_InitialBlock.m_initialBlockHeader.m_Length == sizeof(m_InitialBlock.m_dataSpace));
+ Sentinal* ptr = block->m_Sentinal;
+ _ASSERTE(spot);
+ while(ptr >= spot)
+ {
+ // If this assert goes off then someone overwrote their buffer!
+ // A common candidate is PINVOKE buffer run. To confirm look
+ // up on the stack for NDirect.* Look for the MethodDesc
+ // associated with it. Be very suspicious if it is one that
+ // has a return string buffer!. This usually means the end
+ // programmer did not allocate a big enough buffer before passing
+ // it to the PINVOKE method.
+ if (ptr->m_Marker1 != Sentinal::marker1Val)
+ _ASSERTE(!"Memory overrun!! May be bad buffer passed to PINVOKE. turn on logging LF_STUBS level 6 to find method");
+ ptr = ptr->m_Next;
+ }
+ block->m_Sentinal = ptr;
+}
+#endif // _DEBUG
+
+void StackingAllocator::Clear(StackBlock *ToBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ StackBlock *p = m_FirstBlock;
+ StackBlock *q;
+ _ASSERTE(ToBlock != NULL);
+
+ while (p != ToBlock)
+ {
+ PREFAST_ASSUME(p != NULL);
+
+ q = p;
+ p = p->m_Next;
+
+ INDEBUG(Validate(q, q));
+
+ // we don't give the tail block back to the OS
+ // because we can get into situations where we're growing
+ // back and forth over a single seam for a tiny alloc
+ // and the perf is a disaster -- VSWhidbey #100462
+ if (m_DeferredFreeBlock != NULL)
+ {
+ delete [] (char *)m_DeferredFreeBlock;
+ }
+
+ m_DeferredFreeBlock = q;
+ m_DeferredFreeBlock->m_Next = NULL;
+ }
+}
void * __cdecl operator new(size_t n, StackingAllocator * alloc)
{
STATIC_CONTRACT_THROWS;
return alloc->UnsafeAllocNoThrow((unsigned)n);
}
+StackingAllocatorHolder::~StackingAllocatorHolder()
+{
+ m_pStackingAllocator->Collapse(m_checkpointMarker);
+ if (m_owner)
+ {
+ m_thread->m_stackLocalAllocator = NULL;
+ m_pStackingAllocator->~StackingAllocator();
+ }
+}
+
+StackingAllocatorHolder::StackingAllocatorHolder(StackingAllocator *pStackingAllocator, Thread *pThread, bool owner) :
+ m_pStackingAllocator(pStackingAllocator),
+ m_checkpointMarker(pStackingAllocator->GetCheckpoint()),
+ m_thread(pThread),
+ m_owner(owner)
+{
+ if (m_owner)
+ {
+ m_thread->m_stackLocalAllocator = pStackingAllocator;
+ }
+}
DWORD_PTR m_Length; // Length of block excluding header (needs to be pointer-sized for alignment on IA64)
INDEBUG(Sentinal* m_Sentinal;) // insure that we don't fall of the end of the buffer
INDEBUG(void** m_Pad;) // keep the size a multiple of 8
- char m_Data[]; // Start of user allocation space
+ char *GetData() { return (char *)(this + 1);}
};
// Whenever a checkpoint is requested, a checkpoint structure is allocated
enum
{
- MinBlockSize = 128,
- MaxBlockSize = 4096,
- InitBlockSize = 512
+ MinBlockSize = 0x2000,
+ MaxBlockSize = 0x8000,
};
+private:
+ struct InitialStackBlock
+ {
+ InitialStackBlock();
+ StackBlock m_initialBlockHeader;
+ char m_dataSpace[0x2000];
+ };
+
+public:
+
#ifndef DACCESS_COMPILE
StackingAllocator();
~StackingAllocator();
StackingAllocator() { LIMITED_METHOD_CONTRACT; }
#endif
- void StoreCheckpoint(Checkpoint *c)
- {
- LIMITED_METHOD_CONTRACT;
-
-#ifdef _DEBUG
- m_CheckpointDepth++;
- m_Checkpoints++;
-#endif
-
- // Record previous allocator state in it.
- c->m_OldBlock = m_FirstBlock;
- c->m_OldBytesLeft = m_BytesLeft;
- }
-
+ void StoreCheckpoint(Checkpoint *c);
void* GetCheckpoint();
// @todo move this into a .inl file as many class users of this class don't need to include this body
void* UnsafeAllocSafeThrow(UINT32 size);
void* UnsafeAlloc(UINT32 size);
+
private:
bool AllocNewBlockForBytes(unsigned n);
- StackBlock *m_FirstBlock; // Pointer to head of allocation block list
- char *m_FirstFree; // Pointer to first free byte in head block
- unsigned m_BytesLeft; // Number of free bytes left in head block
- StackBlock *m_InitialBlock; // The first block is special, we never free it
- StackBlock *m_DeferredFreeBlock; // Avoid going to the OS too often by deferring one free
+ StackBlock *m_FirstBlock; // Pointer to head of allocation block list
+ char *m_FirstFree; // Pointer to first free byte in head block
+ unsigned m_BytesLeft; // Number of free bytes left in head block
+ InitialStackBlock m_InitialBlock; // The first block is special, we never free it
+ StackBlock *m_DeferredFreeBlock; // Avoid going to the OS too often by deferring one free
#ifdef _DEBUG
unsigned m_CheckpointDepth;
unsigned m_MaxAlloc;
#endif
- void Init(bool bResetInitBlock)
- {
- WRAPPER_NO_CONTRACT;
-
- if (bResetInitBlock || (m_InitialBlock == NULL))
- {
- Clear(NULL);
- m_FirstBlock = NULL;
- m_FirstFree = NULL;
- m_BytesLeft = 0;
- m_InitialBlock = NULL;
- }
- else
- {
- m_FirstBlock = m_InitialBlock;
- m_FirstFree = m_InitialBlock->m_Data;
- _ASSERTE(FitsIn<unsigned>(m_InitialBlock->m_Length));
- m_BytesLeft = static_cast<unsigned>(m_InitialBlock->m_Length);
- }
- }
+ void Init();
#ifdef _DEBUG
- void Validate(StackBlock *block, void* spot)
- {
- LIMITED_METHOD_CONTRACT;
-
- if (!block)
- return;
- Sentinal* ptr = block->m_Sentinal;
- _ASSERTE(spot);
- while(ptr >= spot)
- {
- // If this assert goes off then someone overwrote their buffer!
- // A common candidate is PINVOKE buffer run. To confirm look
- // up on the stack for NDirect.* Look for the MethodDesc
- // associated with it. Be very suspicious if it is one that
- // has a return string buffer!. This usually means the end
- // programmer did not allocate a big enough buffer before passing
- // it to the PINVOKE method.
- if (ptr->m_Marker1 != Sentinal::marker1Val)
- _ASSERTE(!"Memory overrun!! May be bad buffer passed to PINVOKE. turn on logging LF_STUBS level 6 to find method");
- ptr = ptr->m_Next;
- }
- block->m_Sentinal = ptr;
- }
+ void Validate(StackBlock *block, void* spot);
#endif
- void Clear(StackBlock *ToBlock)
- {
- LIMITED_METHOD_CONTRACT;
-
- StackBlock *p = m_FirstBlock;
- StackBlock *q;
-
- while (p != ToBlock)
- {
- PREFAST_ASSUME(p != NULL);
-
- q = p;
- p = p->m_Next;
-
- INDEBUG(Validate(q, q));
-
- // we don't give the tail block back to the OS
- // because we can get into situations where we're growing
- // back and forth over a single seam for a tiny alloc
- // and the perf is a disaster -- VSWhidbey #100462
- if (m_DeferredFreeBlock != NULL)
- {
- delete [] (char *)m_DeferredFreeBlock;
- }
-
- m_DeferredFreeBlock = q;
- m_DeferredFreeBlock->m_Next = NULL;
- }
- }
+ void Clear(StackBlock *ToBlock);
private :
static Checkpoint s_initialCheckpoint;
};
+#define ACQUIRE_STACKING_ALLOCATOR(stackingAllocatorName) \
+ Thread *pThread__ACQUIRE_STACKING_ALLOCATOR = GetThread(); \
+ StackingAllocator *stackingAllocatorName = pThread__ACQUIRE_STACKING_ALLOCATOR->m_stackLocalAllocator; \
+ bool allocatorOwner__ACQUIRE_STACKING_ALLOCATOR = false; \
+ NewHolder<StackingAllocator> heapAllocatedStackingBuffer__ACQUIRE_STACKING_ALLOCATOR; \
+\
+ if (stackingAllocatorName == NULL) \
+ { \
+ if (pThread__ACQUIRE_STACKING_ALLOCATOR->CheckCanUseStackAlloc()) \
+ { \
+ stackingAllocatorName = new (_alloca(sizeof(StackingAllocator))) StackingAllocator; \
+ } \
+ else \
+ {\
+ stackingAllocatorName = new (nothrow) StackingAllocator; \
+ if (stackingAllocatorName == NULL) \
+ ThrowOutOfMemory(); \
+ heapAllocatedStackingBuffer__ACQUIRE_STACKING_ALLOCATOR = stackingAllocatorName; \
+ }\
+ allocatorOwner__ACQUIRE_STACKING_ALLOCATOR = true; \
+ } \
+ StackingAllocatorHolder sah_ACQUIRE_STACKING_ALLOCATOR(stackingAllocatorName, pThread__ACQUIRE_STACKING_ALLOCATOR, allocatorOwner__ACQUIRE_STACKING_ALLOCATOR)
+
+class Thread;
+class StackingAllocatorHolder
+{
+ StackingAllocator *m_pStackingAllocator;
+ void* m_checkpointMarker;
+ Thread* m_thread;
+ bool m_owner;
+
+ public:
+ ~StackingAllocatorHolder();
+ StackingAllocatorHolder(StackingAllocator *pStackingAllocator, Thread *pThread, bool owner);
+ StackingAllocator *GetStackingAllocator() { return m_pStackingAllocator; }
+ StackingAllocator &operator->() { return *m_pStackingAllocator; }
+};
+
+
void * __cdecl operator new(size_t n, StackingAllocator *alloc);
void * __cdecl operator new[](size_t n, StackingAllocator *alloc);
void * __cdecl operator new(size_t n, StackingAllocator *alloc, const NoThrow&) throw();
HashDatum Data;
DWORD dwHash = m_StringToEntryHashTable->GetHash(pStringData);
- if (m_StringToEntryHashTable->GetValue(pStringData, &Data, dwHash))
- {
- STRINGREF *pStrObj = NULL;
- pStrObj = ((StringLiteralEntry*)Data)->GetStringObject();
- _ASSERTE(!bAddIfNotFound || pStrObj);
- return pStrObj;
+ // Retrieve the string literal from the global string literal map.
+ CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
- }
- else
- {
- // Retrieve the string literal from the global string literal map.
- CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
-
- // TODO: We can be more efficient by checking our local hash table now to see if
- // someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
- // (Rather than waiting until after we look the string up in the global map)
-
- StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetStringLiteral(pStringData, dwHash, bAddIfNotFound));
+ // TODO: We can be more efficient by checking our local hash table now to see if
+ // someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
+ // (Rather than waiting until after we look the string up in the global map)
+
+ StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetStringLiteral(pStringData, dwHash, bAddIfNotFound));
- _ASSERTE(pEntry || !bAddIfNotFound);
+ _ASSERTE(pEntry || !bAddIfNotFound);
- // If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
- if (pEntry)
- {
- // If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
- // hashentry in the appdomain specific map.
- // TODO: except that by not inserting into our local table we always take the global map lock
- // and come into this path, when we could succeed at a lock free lookup above.
-
- if (!bAppDomainWontUnload)
- {
- // Make sure some other thread has not already added it.
- if (!m_StringToEntryHashTable->GetValue(pStringData, &Data))
- {
- // Insert the handle to the string into the hash table.
- m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
- }
- else
- {
- pEntry.Release(); //while we're still under lock
- }
+ // If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
+ if (pEntry)
+ {
+ // If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
+ // hashentry in the appdomain specific map.
+ // TODO: except that by not inserting into our local table we always take the global map lock
+ // and come into this path, when we could succeed at a lock free lookup above.
+
+ if (!bAppDomainWontUnload)
+ {
+ // Make sure some other thread has not already added it.
+ if (!m_StringToEntryHashTable->GetValue(pStringData, &Data))
+ {
+ // Insert the handle to the string into the hash table.
+ m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
}
-#ifdef _DEBUG
else
{
- LOG((LF_APPDOMAIN, LL_INFO10000, "Avoided adding String literal to appdomain map: size: %d bytes\n", pStringData->GetCharCount()));
+ pEntry.Release(); //while we're still under lock
}
-#endif
- pEntry.SuppressRelease();
- STRINGREF *pStrObj = NULL;
- // Retrieve the string objectref from the string literal entry.
- pStrObj = pEntry->GetStringObject();
- _ASSERTE(!bAddIfNotFound || pStrObj);
- return pStrObj;
}
+#ifdef _DEBUG
+ else
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10000, "Avoided adding String literal to appdomain map: size: %d bytes\n", pStringData->GetCharCount()));
+ }
+#endif
+ pEntry.SuppressRelease();
+ STRINGREF *pStrObj = NULL;
+ // Retrieve the string objectref from the string literal entry.
+ pStrObj = pEntry->GetStringObject();
+ _ASSERTE(!bAddIfNotFound || pStrObj);
+ return pStrObj;
}
// If the bAddIfNotFound flag is set then we better have a string
// string object at this point.
HashDatum Data;
StringLiteralEntry *pEntry = NULL;
- if (m_StringToEntryHashTable->GetValue(pStringData, &Data, dwHash))
+ if (m_StringToEntryHashTable->GetValueSpeculative(pStringData, &Data, dwHash)) // Since we hold the critical section here, we can safely use the speculative variant of GetValue
{
pEntry = (StringLiteralEntry*)Data;
// If the entry is already in the table then addref it before we return it.
m_CacheStackBase = 0;
m_CacheStackLimit = 0;
m_CacheStackSufficientExecutionLimit = 0;
+ m_CacheStackStackAllocNonRiskyExecutionLimit = 0;
m_LastAllowableStackAddress= 0;
m_ProbeLimit = 0;
{
m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase);
}
+
+ // Compute the limit used by CheckCanUseStackAllocand cache it on the thread. This minimum stack size should
+ // be sufficient to avoid all significant risk of a moderate size stack alloc interfering with application behavior
+ const UINT_PTR StackAllocNonRiskyExecutionStackSize = 512 * 1024;
+ _ASSERTE(m_CacheStackBase >= m_CacheStackLimit);
+ if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) >
+ StackAllocNonRiskyExecutionStackSize)
+ {
+ m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + StackAllocNonRiskyExecutionStackSize;
+ }
+ else
+ {
+ m_CacheStackStackAllocNonRiskyExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase);
+ }
}
// Ensure that we've setup the stack guarantee properly before we cache the stack limits
nPriority = ResetManagedThreadObject(nPriority);
}
- //m_MarshalAlloc.Collapse(NULL);
-
if (fResetAbort && IsAbortRequested()) {
UnmarkThreadForAbort(TAR_ALL);
}
BOOL IsAddressInStack (PTR_VOID addr) const { return TRUE; }
static BOOL IsAddressInCurrentStack (PTR_VOID addr) { return TRUE; }
- StackingAllocator m_MarshalAlloc;
+ StackingAllocator* m_stackLocalAllocator = NULL;
+ bool CheckCanUseStackAlloc() { return true; }
private:
LoadLevelLimiter *m_pLoadLimiter;
DWORD m_dwLastError;
};
-inline void DoReleaseCheckpoint(void *checkPointMarker)
-{
- WRAPPER_NO_CONTRACT;
- GetThread()->m_MarshalAlloc.Collapse(checkPointMarker);
-}
-
-// CheckPointHolder : Back out to a checkpoint on the thread allocator.
-typedef Holder<void*, DoNothing,DoReleaseCheckpoint> CheckPointHolder;
-
class AVInRuntimeImplOkayHolder
{
public:
// is started using a CheckPointHolder and GetCheckpoint, and this region can then be used for allocations
// from that point onwards, and then all memory is reclaimed when the static scope for the
// checkpoint is exited by the running thread.
- StackingAllocator m_MarshalAlloc;
+ StackingAllocator* m_stackLocalAllocator = NULL;
// Flags used to indicate tasks the thread has to do.
ThreadTasks m_ThreadTasks;
// and GetCachedStackBase for the cached values on this Thread.
static void * GetStackLowerBound();
static void * GetStackUpperBound();
+
+ bool CheckCanUseStackAlloc()
+ {
+ int local;
+ UINT_PTR current = reinterpret_cast<UINT_PTR>(&local);
+ UINT_PTR limit = GetCachedStackStackAllocNonRiskyExecutionLimit();
+ return (current > limit);
+ }
+#else // DACCESS_COMPILE
+ bool CheckCanUseStackAlloc() { return true; }
#endif
enum SetStackLimitScope { fAll, fAllowableOnly };
PTR_VOID GetCachedStackBase() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackBase; }
PTR_VOID GetCachedStackLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackLimit;}
UINT_PTR GetCachedStackSufficientExecutionLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackSufficientExecutionLimit;}
+ UINT_PTR GetCachedStackStackAllocNonRiskyExecutionLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackStackAllocNonRiskyExecutionLimit;}
private:
// Access the base and limit of the stack. (I.e. the memory ranges that the thread has reserved for its stack).
PTR_VOID m_CacheStackBase;
PTR_VOID m_CacheStackLimit;
UINT_PTR m_CacheStackSufficientExecutionLimit;
+ UINT_PTR m_CacheStackStackAllocNonRiskyExecutionLimit;
#define HARD_GUARD_REGION_SIZE GetOsPageSize()
#endif // _DEBUG
-
-
-
-inline void DoReleaseCheckpoint(void *checkPointMarker)
-{
- WRAPPER_NO_CONTRACT;
- GetThread()->m_MarshalAlloc.Collapse(checkPointMarker);
-}
-
-
-// CheckPointHolder : Back out to a checkpoint on the thread allocator.
-typedef Holder<void*, DoNothing, DoReleaseCheckpoint> CheckPointHolder;
-
-
#ifdef _DEBUG_IMPL
// Holder for incrementing the ForbidGCLoaderUse counter.
class GCForbidLoaderUseHolder