#define IMAGE_REL_BASED_REL32 0x10
#define IMAGE_REL_BASED_THUMB_BRANCH24 0x13
+// The identifier for ARM32-specific PC-relative address
+// computation corresponds to the following instruction
+// sequence:
+// l0: movw rX, #imm_lo // 4 byte
+// l4: movt rX, #imm_hi // 4 byte
+// l8: add rX, pc <- after this instruction rX = relocTarget
+//
+// Program counter at l8 is address of l8 + 4
+// Address of relocated movw/movt is l0
+// So, imm should be calculated as the following:
+// imm = relocTarget - (l8 + 4) = relocTarget - (l0 + 8 + 4) = relocTarget - (l_0 + 12)
+// So, the value of offset correction is 12
+//
+#define IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL 0x14
+
#endif // _COR_INFO_H_
CORJIT_FLAG_DESKTOP_QUIRKS = 38, // The JIT should generate desktop-quirk-compatible code
CORJIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
CORJIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
+
+#if defined(_TARGET_ARM_)
+ CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
+#else // !defined(_TARGET_ARM_)
+ CORJIT_FLAG_UNUSED11 = 41
+#endif // !defined(_TARGET_ARM_)
};
CORJIT_FLAGS()
bool m_fNoMetaData; // Do not copy metadata and IL to native image
+ void SetCompilerFlags(void);
+
ZapperOptions();
~ZapperOptions();
};
/* IN OUT */ bool* pUnwindStarted,
bool jmpEpilog);
+ void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
+ void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
+ void genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg);
+
bool genUsedPopToReturn; // True if we use the pop into PC to return,
// False if we didn't and must branch to LR to return.
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+ genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
// Jump to the finally BB
inst_JMP(EJ_jmp, block->bbJumpDest);
// genEHCatchRet:
void CodeGen::genEHCatchRet(BasicBlock* block)
{
- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_INTRET);
- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_INTRET);
+ genMov32RelocatableDisplacement(block->bbJumpDest, REG_INTRET);
}
//------------------------------------------------------------------------
if (EA_IS_RELOC(size))
{
- getEmitter()->emitIns_R_I(INS_movw, size, reg, imm);
- getEmitter()->emitIns_R_I(INS_movt, size, reg, imm);
+ genMov32RelocatableImmediate(size, imm, reg);
}
else if (imm == 0)
{
getEmitter()->emitDataGenEnd();
- getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, jmpTabBase, treeNode->gtRegNum);
- getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, jmpTabBase, treeNode->gtRegNum);
+ genMov32RelocatableDataLabel(jmpTabBase, treeNode->gtRegNum);
genProduceReg(treeNode);
}
compiler->unwindAllocStack(frameSize);
}
+/*-----------------------------------------------------------------------------
+ *
+ * Move of relocatable displacement value to register
+ */
+void CodeGen::genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg)
+{
+ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
+ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
+
+ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
+ {
+ getEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ *
+ * Move of relocatable data-label to register
+ */
+void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg)
+{
+ getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
+ getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
+
+ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
+ {
+ getEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ *
+ * Move of relocatable immediate to register
+ */
+void CodeGen::genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg)
+{
+ _ASSERTE(EA_IS_RELOC(size));
+
+ getEmitter()->emitIns_R_I(INS_movw, size, reg, value);
+ getEmitter()->emitIns_R_I(INS_movt, size, reg, value);
+
+ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
+ {
+ getEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
+ }
+}
+
/*-----------------------------------------------------------------------------
*
* Returns register mask to push/pop to allocate a small stack frame,
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
// the return.
- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+ genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
regTracker.rsTrackRegTrash(REG_LR);
#endif // 0
case BBJ_EHCATCHRET:
// set r0 to the address the VM should return to after the catch
- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
+ genMov32RelocatableDisplacement(block->bbJumpDest, REG_R0);
regTracker.rsTrackRegTrash(REG_R0);
__fallthrough;
// Pick any register except the index register.
//
regNumber regTabBase = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
- getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, jmpTabBase, regTabBase);
- getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, jmpTabBase, regTabBase);
+ genMov32RelocatableDataLabel(jmpTabBase, regTabBase);
regTracker.rsTrackRegTrash(regTabBase);
// LDR PC, [regTableBase + reg * 4] (encoded as LDR PC, [regTableBase, reg, LSL 2]
#endif // defined(LATE_DISASM)
}
+#ifdef _TARGET_ARM_
+/*****************************************************************************
+ * A helper for handling a Thumb-Mov32 of position-independent (PC-relative) value
+ *
+ * This routine either records relocation for the location with the EE,
+ * or creates a virtual relocation entry to perform offset fixup during
+ * compilation without recording it with EE - depending on which of
+ * absolute/relocative relocations mode are used for code section.
+ */
+void emitter::emitHandlePCRelativeMov32(void* location, /* IN */
+ void* target) /* IN */
+{
+ if (emitComp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
+ {
+ emitRecordRelocation(location, target, IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL);
+ }
+ else
+ {
+ emitRecordRelocation(location, target, IMAGE_REL_BASED_THUMB_MOV32);
+ }
+}
+#endif // _TARGET_ARM_
+
/*****************************************************************************
* A helper for recording a call site with the EE.
*/
WORD slotNum = 0, /* IN */
INT32 addlDelta = 0); /* IN */
+#ifdef _TARGET_ARM_
+ void emitHandlePCRelativeMov32(void* location, /* IN */
+ void* target); /* IN */
+#endif
+
void emitRecordCallSite(ULONG instrOffset, /* IN */
CORINFO_SIG_INFO* callSig, /* IN */
CORINFO_METHOD_HANDLE methodHandle); /* IN */
{
assert(ins == INS_movt || ins == INS_movw);
if ((ins == INS_movt) && emitComp->info.compMatchedVM)
- emitRecordRelocation((void*)(dst - 8), (void*)distVal, IMAGE_REL_BASED_THUMB_MOV32);
+ emitHandlePCRelativeMov32((void*)(dst - 8), (void*)distVal);
}
}
else
assert((ins == INS_movt) || (ins == INS_movw));
dst += emitOutput_Thumb2Instr(dst, code);
if ((ins == INS_movt) && emitComp->info.compMatchedVM)
- emitRecordRelocation((void*)(dst - 8), (void*)imm, IMAGE_REL_BASED_THUMB_MOV32);
+ emitHandlePCRelativeMov32((void*)(dst - 8), (void*)imm);
}
else
{
if (EA_IS_RELOC(size))
{
- getEmitter()->emitIns_R_I(INS_movw, size, reg, imm);
- getEmitter()->emitIns_R_I(INS_movt, size, reg, imm);
+ genMov32RelocatableImmediate(size, imm, reg);
}
else if (arm_Valid_Imm_For_Mov(imm))
{
JIT_FLAG_DESKTOP_QUIRKS = 38, // The JIT should generate desktop-quirk-compatible code
JIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
JIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
+
+#if defined(_TARGET_ARM_)
+ JIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
+#else // !defined(_TARGET_ARM_)
+ JIT_FLAG_UNUSED11 = 41
+#endif // !defined(_TARGET_ARM_)
};
// clang-format on
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER0, JIT_FLAG_TIER0);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER1, JIT_FLAG_TIER1);
+#if defined(_TARGET_ARM_)
+
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS, JIT_FLAG_RELATIVE_CODE_RELOCS);
+
+#endif // _TARGET_ARM_
+
#undef FLAGS_EQUAL
}
#if defined(_TARGET_ARM_)
case IMAGE_REL_BASED_THUMB_MOV32:
+ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
case IMAGE_REL_BASED_THUMB_BRANCH24:
+
+# ifdef _DEBUG
+ {
+ CORJIT_FLAGS jitFlags = m_zapper->m_pOpt->m_compilerFlags;
+
+ if (jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS))
+ {
+ _ASSERTE(fRelocType == IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
+ || fRelocType == IMAGE_REL_BASED_THUMB_BRANCH24);
+ }
+ else
+ {
+ _ASSERTE(fRelocType == IMAGE_REL_BASED_THUMB_MOV32
+ || fRelocType == IMAGE_REL_BASED_THUMB_BRANCH24);
+ }
+ }
+# endif // _DEBUG
break;
#endif
#if defined(_TARGET_ARM_)
case IMAGE_REL_BASED_THUMB_MOV32:
+ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
PutThumb2Mov32((UINT16 *)location, targetOffset);
break;
m_legacyMode(false)
,m_fNoMetaData(s_fNGenNoMetaData)
{
- m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
- m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
+ SetCompilerFlags();
m_zapSet = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapSet);
if (m_zapSet != NULL && wcslen(m_zapSet) > 3)
delete [] m_repositoryDir;
}
+void ZapperOptions::SetCompilerFlags(void)
+{
+ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
+ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
+
+#if defined(_TARGET_ARM_)
+# if defined(PLATFORM_UNIX)
+ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS);
+# endif // defined(PLATFORM_UNIX)
+#endif // defined(_TARGET_ARM_)
+}
+
/* --------------------------------------------------------------------------- *
* Zapper class
* --------------------------------------------------------------------------- */
pOptions = ¤tVersionOptions;
zo->m_compilerFlags.Reset();
- zo->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
- zo->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
+ zo->SetCompilerFlags();
zo->m_autodebug = true;
if (pOptions->fDebug)
break;
}
+ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
+ {
+ TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
+
+ // For details about how the value is calculated, see
+ // description of IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
+ const UINT32 offsetCorrection = 12;
+
+ UINT32 imm32 = pActualTarget - (pSite + offsetCorrection);
+
+ PutThumb2Mov32((UINT16 *)pLocation, imm32);
+
+ // IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL does not need base reloc entry
+ return;
+ }
+
case IMAGE_REL_BASED_THUMB_BRANCH24:
{
TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
#if defined(_TARGET_ARM_)
case IMAGE_REL_BASED_THUMB_MOV32:
+ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
targetOffset = (int)GetThumb2Mov32((UINT16 *)pLocation);
break;