1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
9 // Zapping of relocations
11 // ======================================================================================
15 #include "zaprelocs.h"
18 void PDB_NoticeReloc(ZapRelocationType type, DWORD rvaReloc, ZapNode * pTarget, int targetOffset);
21 void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int targetOffset, ZapRelocationType type)
23 _ASSERTE(pTarget != NULL);
25 PBYTE pLocation = (PBYTE)pSrc + offset;
26 DWORD rva = m_pImage->GetCurrentRVA() + offset;
27 TADDR pActualTarget = (TADDR)m_pImage->GetBaseAddress() + pTarget->GetRVA() + targetOffset;
30 PDB_NoticeReloc(type, rva, pTarget, targetOffset);
35 case IMAGE_REL_BASED_ABSOLUTE:
36 *(UNALIGNED DWORD *)pLocation = pTarget->GetRVA() + targetOffset;
37 // IMAGE_REL_BASED_ABSOLUTE does not need base reloc entry
40 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
41 _ASSERTE(targetOffset == 0);
42 *(UNALIGNED DWORD *)pLocation = (DWORD)CORCOMPILE_TAG_TOKEN(pTarget->GetRVA());
43 // IMAGE_REL_BASED_ABSOLUTE_TAGGED does not need base reloc entry
46 case IMAGE_REL_BASED_PTR:
48 // Misaligned relocs disable ASLR on ARM. We should never ever emit them.
49 _ASSERTE(IS_ALIGNED(rva, sizeof(TADDR)));
51 *(UNALIGNED TADDR *)pLocation = pActualTarget;
54 case IMAGE_REL_BASED_RELPTR:
56 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
57 *(UNALIGNED TADDR *)pLocation = (INT32)(pActualTarget - pSite);
59 // neither IMAGE_REL_BASED_RELPTR nor IMAGE_REL_BASED_MD_METHODENTRY need base reloc entry
62 case IMAGE_REL_BASED_RELPTR32:
64 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
65 *(UNALIGNED INT32 *)pLocation = (INT32)(pActualTarget - pSite);
67 // IMAGE_REL_BASED_RELPTR32 does not need base reloc entry
70 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
71 case IMAGE_REL_BASED_REL32:
73 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
74 *(UNALIGNED INT32 *)pLocation = (INT32)(pActualTarget - (pSite + sizeof(INT32)));
76 // IMAGE_REL_BASED_REL32 does not need base reloc entry
78 #endif // _TARGET_X86_ || _TARGET_AMD64_
80 #if defined(_TARGET_ARM_)
81 case IMAGE_REL_BASED_THUMB_MOV32:
83 PutThumb2Mov32((UINT16 *)pLocation, (UINT32)pActualTarget);
87 case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
89 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
91 // For details about how the value is calculated, see
92 // description of IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
93 const UINT32 offsetCorrection = 12;
95 UINT32 imm32 = pActualTarget - (pSite + offsetCorrection);
97 PutThumb2Mov32((UINT16 *)pLocation, imm32);
99 // IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL does not need base reloc entry
103 case IMAGE_REL_BASED_THUMB_BRANCH24:
105 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
107 // Kind of a workaround: make this reloc work both for calls (which have the thumb bit set),
108 // and for relative jumps used for hot/cold splitting (which don't).
109 pActualTarget &= ~THUMB_CODE;
111 // Calculate the reloffset without the ThumbBit set so that it can be correctly encoded.
112 _ASSERTE(!(pActualTarget & THUMB_CODE));// we expect pActualTarget not to have the thumb bit set
113 _ASSERTE(!(pSite & THUMB_CODE)); // we expect pSite not to have the thumb bit set
114 INT32 relOffset = (INT32)(pActualTarget - (pSite + sizeof(INT32)));
115 if (!FitsInThumb2BlRel24(relOffset))
117 // Retry the compilation with IMAGE_REL_BASED_THUMB_BRANCH24 relocations disabled
118 // (See code:ZapInfo::getRelocTypeHint)
119 ThrowHR(COR_E_OVERFLOW);
121 PutThumb2BlRel24((UINT16 *)pLocation, relOffset);
123 // IMAGE_REL_BASED_THUMB_BRANCH24 does not need base reloc entry
126 #if defined(_TARGET_ARM64_)
127 case IMAGE_REL_ARM64_BRANCH26:
129 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
131 INT32 relOffset = (INT32)(pActualTarget - pSite);
132 if (!FitsInRel28(relOffset))
134 ThrowHR(COR_E_OVERFLOW);
136 PutArm64Rel28((UINT32 *)pLocation,relOffset);
140 case IMAGE_REL_ARM64_PAGEBASE_REL21:
142 TADDR pSitePage = ((TADDR)m_pImage->GetBaseAddress() + rva) & 0xFFFFFFFFFFFFF000LL;
143 TADDR pActualTargetPage = pActualTarget & 0xFFFFFFFFFFFFF000LL;
145 INT64 relPage = (INT64)(pActualTargetPage - pSitePage);
146 INT32 imm21 = (INT32)(relPage >> 12) & 0x1FFFFF;
147 PutArm64Rel21((UINT32 *)pLocation, imm21);
151 case IMAGE_REL_ARM64_PAGEOFFSET_12A:
153 INT32 imm12 = (INT32)(pActualTarget & 0xFFFLL);
154 PutArm64Rel12((UINT32 *)pLocation, imm12);
160 _ASSERTE(!"Unknown relocation type");
164 DWORD page = AlignDown(rva, RELOCATION_PAGE_SIZE);
171 m_pageIndex = m_SerializedRelocs.GetCount();
173 // Reserve space for IMAGE_BASE_RELOCATION
174 for (size_t iSpace = 0; iSpace < sizeof(IMAGE_BASE_RELOCATION) / sizeof(USHORT); iSpace++)
175 m_SerializedRelocs.Append(0);
178 m_SerializedRelocs.Append((USHORT)(AlignmentTrim(rva, RELOCATION_PAGE_SIZE) | (type << 12)));
181 void ZapBaseRelocs::FlushWriter()
185 // The blocks has to be 4-byte aligned
186 if (m_SerializedRelocs.GetCount() & 1)
187 m_SerializedRelocs.Append(0);
189 IMAGE_BASE_RELOCATION * pBaseRelocation = (IMAGE_BASE_RELOCATION *)&(m_SerializedRelocs[m_pageIndex]);
190 pBaseRelocation->VirtualAddress = m_page;
191 pBaseRelocation->SizeOfBlock = (m_SerializedRelocs.GetCount() - m_pageIndex) * sizeof(USHORT);
197 void ZapBaseRelocs::Save(ZapWriter * pZapWriter)
201 pZapWriter->SetWritingRelocs();
203 // Write the relocs as blob
204 pZapWriter->Write(&m_SerializedRelocs[0], m_SerializedRelocs.GetCount() * sizeof(USHORT));
207 //////////////////////////////////////////////////////////////////////////////
212 int _cdecl CmpZapRelocs(const void *p1, const void *p2)
214 LIMITED_METHOD_CONTRACT;
216 const ZapReloc *relocTemp1 = (ZapReloc *)p1;
217 const ZapReloc *relocTemp2 = (ZapReloc *)p2;
218 if (relocTemp1->m_offset < relocTemp2->m_offset)
220 else if (relocTemp1->m_offset > relocTemp2->m_offset)
226 void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter)
228 if (m_pRelocs != NULL)
231 // pre-pass to figure out if we need to sort
232 // if the offsets are not in ascending order AND the offsets within this
233 // array ending up describing locations in different pages, the relocation
234 // writer generates bad relocation info (e.g. multiple entries for the same page)
235 // that is no longer accepted by the OS loader
236 // Also, having relocs in ascending order allows a more compact representation.
238 ZapReloc *pReloc = m_pRelocs;
240 // we need to check only for more than one reloc entry
241 if (pReloc->m_type != IMAGE_REL_INVALID && pReloc[1].m_type != IMAGE_REL_INVALID)
243 bool isSorted = true;
244 DWORD lastOffset = pReloc->m_offset;
247 // we start with the second entry (the first entry is already consumed)
248 while (pReloc[cReloc].m_type != IMAGE_REL_INVALID)
250 // we cannot abort the loop here because we need to count the entries
251 // to properly sort the relocs!!!
252 if (pReloc[cReloc].m_offset < lastOffset)
254 lastOffset = pReloc[cReloc].m_offset;
259 qsort(pReloc, cReloc, sizeof(ZapReloc), CmpZapRelocs);
263 ZapImage * pImage = ZapImage::GetImage(pZapWriter);
264 PBYTE pData = GetData();
266 for (pReloc = m_pRelocs; pReloc->m_type != IMAGE_REL_INVALID; pReloc++)
268 PBYTE pLocation = pData + pReloc->m_offset;
269 int targetOffset = 0;
272 switch (pReloc->m_type)
274 case IMAGE_REL_BASED_ABSOLUTE:
275 targetOffset = *(UNALIGNED DWORD *)pLocation;
278 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
282 case IMAGE_REL_BASED_PTR:
283 targetOffset = (int)*(UNALIGNED TADDR *)pLocation;
285 case IMAGE_REL_BASED_RELPTR:
286 targetOffset = (int)*(UNALIGNED TADDR *)pLocation;
289 case IMAGE_REL_BASED_RELPTR32:
290 targetOffset = (int)*(UNALIGNED INT32 *)pLocation;
293 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
294 case IMAGE_REL_BASED_REL32:
295 targetOffset = *(UNALIGNED INT32 *)pLocation;
297 #endif // _TARGET_X86_ || _TARGET_AMD64_
299 #if defined(_TARGET_ARM_)
300 case IMAGE_REL_BASED_THUMB_MOV32:
301 case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
302 targetOffset = (int)GetThumb2Mov32((UINT16 *)pLocation);
305 case IMAGE_REL_BASED_THUMB_BRANCH24:
306 targetOffset = GetThumb2BlRel24((UINT16 *)pLocation);
308 #endif // defined(_TARGET_ARM_)
310 #if defined(_TARGET_ARM64_)
311 case IMAGE_REL_ARM64_BRANCH26:
312 targetOffset = (int)GetArm64Rel28((UINT32*)pLocation);
315 case IMAGE_REL_ARM64_PAGEBASE_REL21:
316 targetOffset = (int)GetArm64Rel21((UINT32*)pLocation);
319 case IMAGE_REL_ARM64_PAGEOFFSET_12A:
320 targetOffset = (int)GetArm64Rel12((UINT32*)pLocation);
323 #endif // defined(_TARGET_ARM64_)
326 _ASSERTE(!"Unknown reloc type");
330 pImage->WriteReloc(pData, pReloc->m_offset,
331 pReloc->m_pTargetNode, targetOffset, pReloc->m_type);
335 ZapBlob::Save(pZapWriter);
338 COUNT_T ZapBlobWithRelocs::GetCountOfStraddlerRelocations(DWORD dwPos)
340 if (m_pRelocs == NULL)
343 // Straddlers can exist only if the node is crossing page boundary
344 if (AlignDown(dwPos, RELOCATION_PAGE_SIZE) == AlignDown(dwPos + GetSize() - 1, RELOCATION_PAGE_SIZE))
347 COUNT_T nStraddlers = 0;
349 for (ZapReloc * pReloc = m_pRelocs; pReloc->m_type != IMAGE_REL_INVALID; pReloc++)
351 if (pReloc->m_type == IMAGE_REL_BASED_PTR)
353 if (AlignmentTrim(dwPos + pReloc->m_offset, RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR))
361 ZapBlobWithRelocs * ZapBlobWithRelocs::NewBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize)
363 S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapBlobWithRelocs)) + S_SIZE_T(cbSize);
364 if(cbAllocSize.IsOverflow())
365 ThrowHR(COR_E_OVERFLOW);
367 void * pMemory = new (pWriter->GetHeap()) BYTE[cbAllocSize.Value()];
369 ZapBlobWithRelocs * pZapBlobWithRelocs = new (pMemory) ZapBlobWithRelocs(cbSize);
372 memcpy((void*)(pZapBlobWithRelocs + 1), pData, cbSize);
374 return pZapBlobWithRelocs;
377 template <DWORD alignment>
378 class ZapAlignedBlobWithRelocsConst : public ZapBlobWithRelocs
381 ZapAlignedBlobWithRelocsConst(SIZE_T cbSize)
382 : ZapBlobWithRelocs(cbSize)
387 virtual UINT GetAlignment()
392 static ZapBlobWithRelocs * NewBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize)
394 S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapAlignedBlobWithRelocsConst<alignment>)) + S_SIZE_T(cbSize);
395 if(cbAllocSize.IsOverflow())
396 ThrowHR(COR_E_OVERFLOW);
398 void * pMemory = new (pWriter->GetHeap()) BYTE[cbAllocSize.Value()];
400 ZapAlignedBlobWithRelocsConst<alignment> * pZapBlob = new (pMemory) ZapAlignedBlobWithRelocsConst<alignment>(cbSize);
403 memcpy((void*)(pZapBlob + 1), pData, cbSize);
409 ZapBlobWithRelocs * ZapBlobWithRelocs::NewAlignedBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize, SIZE_T cbAlignment)
414 return ZapBlobWithRelocs::NewBlob(pWriter, pData, cbSize);
416 return ZapAlignedBlobWithRelocsConst<2>::NewBlob(pWriter, pData, cbSize);
418 return ZapAlignedBlobWithRelocsConst<4>::NewBlob(pWriter, pData, cbSize);
420 return ZapAlignedBlobWithRelocsConst<8>::NewBlob(pWriter, pData, cbSize);
422 return ZapAlignedBlobWithRelocsConst<16>::NewBlob(pWriter, pData, cbSize);
425 _ASSERTE(!"Requested alignment not supported");