JITHELPER(CORINFO_HELP_EE_PRESTUB, ThePreStub, CORINFO_HELP_SIG_NO_ALIGN_STUB)
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+#if defined(HAS_FIXUP_PRECODE)
JITHELPER(CORINFO_HELP_EE_PRECODE_FIXUP, PrecodeFixupThunk, CORINFO_HELP_SIG_NO_ALIGN_STUB)
#else
JITHELPER(CORINFO_HELP_EE_PRECODE_FIXUP, NULL, CORINFO_HELP_SIG_NO_ALIGN_STUB)
ASMCONSTANTS_C_ASSERT(SIZEOF__FaultingExceptionFrame == sizeof(FaultingExceptionFrame));
ASMCONSTANTS_C_ASSERT(FaultingExceptionFrame__m_fFilterExecuted == offsetof(FaultingExceptionFrame, m_fFilterExecuted));
+#define SIZEOF__FixupPrecode 24
+#define Offset_PrecodeChunkIndex 15
+#define Offset_MethodDescChunkIndex 14
+#define MethodDesc_ALIGNMENT_SHIFT 3
+#define FixupPrecode_ALIGNMENT_SHIFT_1 3
+#define FixupPrecode_ALIGNMENT_SHIFT_2 4
+
+ASMCONSTANTS_C_ASSERT(SIZEOF__FixupPrecode == sizeof(FixupPrecode));
+ASMCONSTANTS_C_ASSERT(Offset_PrecodeChunkIndex == offsetof(FixupPrecode, m_PrecodeChunkIndex));
+ASMCONSTANTS_C_ASSERT(Offset_MethodDescChunkIndex == offsetof(FixupPrecode, m_MethodDescChunkIndex));
+ASMCONSTANTS_C_ASSERT(MethodDesc_ALIGNMENT_SHIFT == MethodDesc::ALIGNMENT_SHIFT);
+ASMCONSTANTS_C_ASSERT((1<<FixupPrecode_ALIGNMENT_SHIFT_1) + (1<<FixupPrecode_ALIGNMENT_SHIFT_2) == sizeof(FixupPrecode));
+
#ifndef CROSSGEN_COMPILE
#define ResolveCacheElem__target 0x10
#define ResolveCacheElem__pNext 0x18
NESTED_END
; ------------------------------------------------------------------
-; ARM64TODO: Implement PrecodeFixupThunk when PreCode is Enabled
+; The call in fixup precode initally points to this function.
+; The pupose of this function is to load the MethodDesc and forward the call to prestub.
NESTED_ENTRY PrecodeFixupThunk
- brk #0
+
+ ; x12 = FixupPrecode *
+ ; On Exit
+ ; x12 = MethodDesc*
+ ; x13, x14 Trashed
+ ; Inline computation done by FixupPrecode::GetMethodDesc()
+ ldrb w13, [x12, #Offset_PrecodeChunkIndex] ; m_PrecodeChunkIndex
+ ldrb w14, [x12, #Offset_MethodDescChunkIndex] ; m_MethodDescChunkIndex
+
+ add x12,x12,w13,uxtw #FixupPrecode_ALIGNMENT_SHIFT_1
+ add x13,x12,w13,uxtw #FixupPrecode_ALIGNMENT_SHIFT_2
+ ldr x13, [x13,#SIZEOF__FixupPrecode]
+ add x12,x13,w14,uxtw #MethodDesc_ALIGNMENT_SHIFT
+
+ b ThePreStub
+
NESTED_END
; ------------------------------------------------------------------
#define USE_INDIRECT_CODEHEADER
-//#define HAS_FIXUP_PRECODE 1
-//#define HAS_FIXUP_PRECODE_CHUNKS 1
+#define HAS_FIXUP_PRECODE 1
+#define HAS_FIXUP_PRECODE_CHUNKS 1
// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
#define HAS_THISPTR_RETBUF_PRECODE 1
struct FixupPrecode {
- static const int Type = 0xfc;
+ static const int Type = 0x0C;
- // mov r12, pc
- // ldr pc, [pc, #4] ; =m_pTarget
+ // adr x12, #0
+ // ldr x11, [pc, #12] ; =m_pTarget
+ // br x11
// dcb m_MethodDescChunkIndex
// dcb m_PrecodeChunkIndex
+ // 2 byte padding
// dcd m_pTarget
- WORD m_rgCode[3];
+
+
+ UINT32 m_rgCode[3];
+ BYTE padding[2];
BYTE m_MethodDescChunkIndex;
BYTE m_PrecodeChunkIndex;
TADDR m_pTarget;
void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+ void InitCommon()
+ {
+ WRAPPER_NO_CONTRACT;
+ int n = 0;
+
+ m_rgCode[n++] = 0x1000000C; // adr x12, #0
+ m_rgCode[n++] = 0x5800006B; // ldr x11, [pc, #12] ; =m_pTarget
+
+ _ASSERTE((UINT32*)&m_pTarget == &m_rgCode[n + 2]);
+
+ m_rgCode[n++] = 0xD61F0160; // br x11
+
+ _ASSERTE(n == _countof(m_rgCode));
+ }
TADDR GetBase()
{
- _ASSERTE(!"ARM64:NYI");
- return NULL;
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
}
TADDR GetMethodDesc();
PCODE GetTarget()
{
- _ASSERTE(!"ARM64:NYI");
- return NULL;
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
{
- _ASSERTE(!"ARM64:NYI");
- return NULL;
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pTarget);
+ return (TADDR)InterlockedCompareExchange64(
+ (LONGLONG*)&m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
static BOOL IsFixupPrecodeByASM(PCODE addr)
{
- _ASSERTE(!"ARM64:NYI");
- return NULL;
+ PTR_DWORD pInstr = dac_cast<PTR_DWORD>(PCODEToPINSTR(addr));
+ return
+ (pInstr[0] == 0x1000000C) &&
+ (pInstr[1] == 0x5800006B) &&
+ (pInstr[2] == 0xD61F0160);
}
#ifdef FEATURE_PREJIT
TADDR FixupPrecode::GetMethodDesc()
{
- _ASSERTE(!"ARM64:NYI");
- return NULL;
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This lookup is also manually inlined in PrecodeFixupThunk assembly code
+ TADDR base = *PTR_TADDR(GetBase());
+ if (base == NULL)
+ return NULL;
+ return base + (m_MethodDescChunkIndex * MethodDesc::ALIGNMENT);
}
#ifdef DACCESS_COMPILE
void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
- _ASSERTE(!"ARM64:NYI");
+ SUPPORTS_DAC;
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FixupPrecode));
+
+ DacEnumMemoryRegion(GetBase(), sizeof(TADDR));
}
#endif // DACCESS_COMPILE
void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
{
- _ASSERTE(!"ARM64:NYI");
+ WRAPPER_NO_CONTRACT;
+
+ InitCommon();
+
+ // Initialize chunk indices only if they are not initialized yet. This is necessary to make MethodDesc::Reset work.
+ if (m_PrecodeChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+ }
+
+ if (iMethodDescChunkIndex != -1)
+ {
+ if (m_MethodDescChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iMethodDescChunkIndex));
+ m_MethodDescChunkIndex = static_cast<BYTE>(iMethodDescChunkIndex);
+ }
+
+ if (*(void**)GetBase() == NULL)
+ *(void**)GetBase() = (BYTE*)pMD - (iMethodDescChunkIndex * MethodDesc::ALIGNMENT);
+ }
+
+ _ASSERTE(GetMethodDesc() == (TADDR)pMD);
+
+ if (pLoaderAllocator != NULL)
+ {
+ m_pTarget = GetEEFuncEntryPoint(PrecodeFixupThunk);
+ }
}
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
// Partial initialization. Used to save regrouped chunks.
void FixupPrecode::InitForSave(int iPrecodeChunkIndex)
{
- _ASSERTE(!"ARM64:NYI");
+ STANDARD_VM_CONTRACT;
+
+ InitCommon();
+
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+ // The rest is initialized in code:FixupPrecode::Fixup
}
void FixupPrecode::Fixup(DataImage *image, MethodDesc * pMD)
{
- _ASSERTE(!"ARM64:NYI");
+ STANDARD_VM_CONTRACT;
+
+ // Note that GetMethodDesc() does not return the correct value because of
+ // regrouping of MethodDescs into hot and cold blocks. That's why the caller
+ // has to supply the actual MethodDesc
+
+ SSIZE_T mdChunkOffset;
+ ZapNode * pMDChunkNode = image->GetNodeForStructure(pMD, &mdChunkOffset);
+ ZapNode * pHelperThunk = image->GetHelperThunk(CORINFO_HELP_EE_PRECODE_FIXUP);
+
+ image->FixupFieldToNode(this, offsetof(FixupPrecode, m_pTarget), pHelperThunk);
+
+ // Set the actual chunk index
+ FixupPrecode * pNewPrecode = (FixupPrecode *)image->GetImagePointer(this);
+
+ size_t mdOffset = mdChunkOffset - sizeof(MethodDescChunk);
+ size_t chunkIndex = mdOffset / MethodDesc::ALIGNMENT;
+ _ASSERTE(FitsInU1(chunkIndex));
+ pNewPrecode->m_MethodDescChunkIndex = (BYTE)chunkIndex;
+
+ // Fixup the base of MethodDescChunk
+ if (m_PrecodeChunkIndex == 0)
+ {
+ image->FixupFieldToNode(this, (BYTE *)GetBase() - (BYTE *)this,
+ pMDChunkNode, sizeof(MethodDescChunk));
+ }
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION
{
PTR_DWORD pInstr = dac_cast<PTR_DWORD>(PCODEToPINSTR(pCode));
- // ARM64TODO: Check for FixupPrecode
+ //FixupPrecode
+#if defined(HAS_FIXUP_PRECODE)
+ if (FixupPrecode::IsFixupPrecodeByASM(pCode))
+ {
+ PCODE pTarget = dac_cast<PTR_FixupPrecode>(pInstr)->m_pTarget;
+
+ if (isJump(pTarget))
+ {
+ pTarget = decodeJump(pTarget);
+ }
+
+ return pTarget == (TADDR)PrecodeFixupThunk;
+ }
+#endif
// StubPrecode
if (pInstr[0] == 0x10000089 && // adr x9, #16
{
PCODE pTarget = dac_cast<PTR_StubPrecode>(pInstr)->m_pTarget;
- // ARM64TODO: implement for NGen case
+ if (isJump(pTarget))
+ {
+ pTarget = decodeJump(pTarget);
+ }
return pTarget == GetPreStubEntryPoint();
}
// SetTargetInterlocked does not modify code on ARM so the flush instruction cache is
// not necessary.
//
-#if !defined(_TARGET_ARM_)
+#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
if (ret) {
FlushInstructionCache(GetCurrentProcess(),this,SizeOf());
}