#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
ASM_OFFSET( 4, 8, InterfaceDispatchCell, m_pCache)
-#ifndef HOST_64BIT
+#ifdef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
ASM_OFFSET( 8, 0, InterfaceDispatchCache, m_pCell)
#endif
ASM_OFFSET( 10, 20, InterfaceDispatchCache, m_rgEntries)
// any more) we can place them on one of several free lists based on their size.
//
-#if defined(HOST_AMD64) || defined(HOST_ARM64)
+#ifndef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// Head of the list of discarded cache blocks that can't be re-used just yet.
-InterfaceDispatchCache * g_pDiscardedCacheList; // for AMD64 and ARM64, m_pCell is not used and we can link the discarded blocks themselves
+InterfaceDispatchCache * g_pDiscardedCacheList; // m_pCell is not used and we can link the discarded blocks themselves
-#else // defined(HOST_AMD64) || defined(HOST_ARM64)
+#else // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
struct DiscardedCacheBlock
{
// Free list of DiscardedCacheBlock items
static DiscardedCacheBlock * g_pDiscardedCacheFree = NULL;
-#endif // defined(HOST_AMD64) || defined(HOST_ARM64)
+#endif // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// Free lists for each cache size up to the maximum. We allocate from these in preference to new memory.
static InterfaceDispatchCache * g_rgFreeLists[CID_MAX_CACHE_SIZE_LOG2 + 1];
CrstHolder lh(&g_sListLock);
-#if defined(HOST_AMD64) || defined(HOST_ARM64)
+#ifndef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
- // on AMD64 and ARM64, we can thread the list through the blocks directly
+ // we can thread the list through the blocks directly
pCache->m_pNextFree = g_pDiscardedCacheList;
g_pDiscardedCacheList = pCache;
-#else // defined(HOST_AMD64) || defined(HOST_ARM64)
+#else // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// on other architectures, we cannot overwrite pCache->m_pNextFree yet
// because it shares storage with m_pCell which may still be used as a back
g_pDiscardedCacheList = pDiscardedCacheBlock;
}
-#endif // defined(HOST_AMD64) || defined(HOST_ARM64)
+#endif // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
}
// Called during a GC to empty the list of discarded caches (which we can now guarantee aren't being accessed)
// No need for any locks, we're not racing with any other threads any more.
// Walk the list of discarded caches.
-#if defined(HOST_AMD64) || defined(HOST_ARM64)
+#ifndef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
- // on AMD64, this is threaded directly through the cache blocks
+ // this is threaded directly through the cache blocks
InterfaceDispatchCache * pCache = g_pDiscardedCacheList;
while (pCache)
{
pCache = pNextCache;
}
-#else // defined(HOST_AMD64) || defined(HOST_ARM64)
+#else // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// on other architectures, we use an auxiliary list instead
DiscardedCacheBlock * pDiscardedCacheBlock = g_pDiscardedCacheList;
pDiscardedCacheBlock = pNextDiscardedCacheBlock;
}
-#endif // defined(HOST_AMD64) || defined(HOST_ARM64)
+#endif // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// We processed all the discarded entries, so we can simply NULL the list head.
g_pDiscardedCacheList = NULL;
if (InterfaceDispatchCell::IsCache(newCacheValue))
{
pCache = (InterfaceDispatchCache*)newCacheValue;
-#if !defined(HOST_AMD64) && !defined(HOST_ARM64)
- // Set back pointer to interface dispatch cell for non-AMD64 and non-ARM64
- // for AMD64 and ARM64, we have enough registers to make this trick unnecessary
+#ifdef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
+ // Set back pointer to interface dispatch cell for x86 and ARM, on other
+ // architectures we have enough registers to make this trick unnecessary
pCache->m_pCell = pCell;
-#endif // !defined(HOST_AMD64) && !defined(HOST_ARM64)
+#endif // INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
// Add entry to the first unused slot.
InterfaceDispatchCacheEntry * pCacheEntry = &pCache->m_rgEntries[cOldCacheEntries];
// cache miss processing needs to determine this value in a synchronized manner, so it can't be contained in
// the owning interface dispatch indirection cell) and a list entry used to link the caches in one of a couple
// of lists related to cache reclamation.
+
+#if defined(HOST_ARM) || defined(HOST_X86)
+// On ARM and x86 the slow path in the stubs needs to reload the cell pointer from the cache due to the lack
+// of available (volatile non-argument) registers.
+#define INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
+#endif // defined(HOST_ARM) || defined(HOST_X86)
+
#pragma warning(push)
#pragma warning(disable:4200) // nonstandard extension used: zero-sized array in struct/union
struct InterfaceDispatchCell;
union
{
InterfaceDispatchCache * m_pNextFree; // next in free list
-#ifndef HOST_AMD64
- InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell - not used for AMD64
+#ifdef INTERFACE_DISPATCH_CACHE_HAS_CELL_BACKPOINTER
+ InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell
#endif
};
uint32_t m_cEntries;
case ArrayMethodKind.AddressWithHiddenArg:
{
var parameters = new TypeDesc[_owningType.Rank + 1];
- parameters[0] = Context.GetWellKnownType(WellKnownType.IntPtr);
+ parameters[0] = Context.GetPointerType(Context.GetWellKnownType(WellKnownType.Void));
for (int i = 0; i < _owningType.Rank; i++)
parameters[i + 1] = _owningType.Context.GetWellKnownType(WellKnownType.Int32);
_signature = new MethodSignature(0, 0, _owningType.ElementType.MakeByRefType(), parameters, MethodSignature.EmbeddedSignatureMismatchPermittedFlag);
// Shared instance methods on generic valuetypes have a hidden parameter with the generic context.
// We add it to the signature so that we can refer to it from IL.
- parameters[0] = Context.GetWellKnownType(WellKnownType.IntPtr);
+ parameters[0] = Context.GetWellKnownType(WellKnownType.Void).MakePointerType();
for (int i = 0; i < _methodRepresented.Signature.Length; i++)
parameters[i + 1] = _methodRepresented.Signature[i];
}
IEETypeNode interfaceType = factory.NecessaryTypeSymbol(_targetMethod.OwningType);
- if (interfaceType.RepresentsIndirectionCell)
+ if (factory.Target.SupportsRelativePointers)
{
- objData.EmitReloc(interfaceType, RelocType.IMAGE_REL_BASED_RELPTR32,
- (int)InterfaceDispatchCellCachePointerFlags.CachePointerIsIndirectedInterfaceRelativePointer);
+ if (interfaceType.RepresentsIndirectionCell)
+ {
+ objData.EmitReloc(interfaceType, RelocType.IMAGE_REL_BASED_RELPTR32,
+ (int)InterfaceDispatchCellCachePointerFlags.CachePointerIsIndirectedInterfaceRelativePointer);
+ }
+ else
+ {
+ objData.EmitReloc(interfaceType, RelocType.IMAGE_REL_BASED_RELPTR32,
+ (int)InterfaceDispatchCellCachePointerFlags.CachePointerIsInterfaceRelativePointer);
+ }
+
+ if (objData.TargetPointerSize == 8)
+ {
+ // IMAGE_REL_BASED_RELPTR is a 32-bit relocation. However, the cell needs a full pointer
+ // width there since a pointer to the cache will be written into the cell. Emit additional
+ // 32 bits on targets whose pointer size is 64 bit.
+ objData.EmitInt(0);
+ }
}
else
{
- objData.EmitReloc(interfaceType, RelocType.IMAGE_REL_BASED_RELPTR32,
- (int)InterfaceDispatchCellCachePointerFlags.CachePointerIsInterfaceRelativePointer);
- }
-
- if (objData.TargetPointerSize == 8)
- {
- // IMAGE_REL_BASED_RELPTR is a 32-bit relocation. However, the cell needs a full pointer
- // width there since a pointer to the cache will be written into the cell. Emit additional
- // 32 bits on targets whose pointer size is 64 bit.
- objData.EmitInt(0);
+ // There are no free bits in the cache flags, but we could support the indirection cell case
+ // by repurposing "CachePointerIsIndirectedInterfaceRelativePointer" to mean "relative indirect
+ // if the target supports it, simple indirect otherwise".
+ Debug.Assert(!interfaceType.RepresentsIndirectionCell);
+ objData.EmitPointerReloc(interfaceType,
+ (int)InterfaceDispatchCellCachePointerFlags.CachePointerIsInterfacePointerOrMetadataToken);
}
}