1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
11 #include "dataimage.h"
17 // Include Zapper infrastructure here
19 // dataimage.cpp is the only place where Zapper infrasture should be used directly in the VM.
20 // The rest of the VM should never use Zapper infrastructure directly for good layering.
21 // The long term goal is to move all NGen specific parts like Save and Fixup methods out of the VM,
22 // and remove the dataimage.cpp completely.
25 #include "../zap/zapwriter.h"
26 #include "../zap/zapimage.h"
27 #include "../zap/zapimport.h"
28 #include "inlinetracking.h"
30 #define NodeTypeForItemKind(kind) ((ZapNodeType)(ZapNodeType_StoredStructure + kind))
32 class ZapStoredStructure : public ZapNode
39 ZapStoredStructure(DWORD dwSize, BYTE kind, BYTE align)
40 : m_dwSize(dwSize), m_kind(kind), m_align(align)
49 DataImage::ItemKind GetKind()
51 return (DataImage::ItemKind)m_kind;
54 virtual DWORD GetSize()
59 virtual UINT GetAlignment()
64 virtual ZapNodeType GetType()
66 return NodeTypeForItemKind(m_kind);
69 virtual void Save(ZapWriter * pZapWriter);
72 inline ZapStoredStructure * AsStoredStructure(ZapNode * pNode)
74 // Verify that it is one of the StoredStructure subtypes
75 _ASSERTE(pNode->GetType() >= ZapNodeType_StoredStructure);
76 return (ZapStoredStructure *)pNode;
79 struct InternedStructureKey
81 InternedStructureKey(const void * data, DWORD dwSize, DataImage::ItemKind kind)
82 : m_data(data), m_dwSize(dwSize), m_kind(kind)
88 DataImage::ItemKind m_kind;
91 class InternedStructureTraits : public NoRemoveSHashTraits< DefaultSHashTraits<ZapStoredStructure *> >
94 typedef InternedStructureKey key_t;
96 static key_t GetKey(element_t e)
98 LIMITED_METHOD_CONTRACT;
99 return InternedStructureKey(e->GetData(), e->GetSize(), e->GetKind());
101 static BOOL Equals(key_t k1, key_t k2)
103 LIMITED_METHOD_CONTRACT;
104 return (k1.m_dwSize == k2.m_dwSize) &&
105 (k1.m_kind == k2.m_kind) &&
106 memcmp(k1.m_data, k2.m_data, k1.m_dwSize) == 0;
108 static count_t Hash(key_t k)
110 LIMITED_METHOD_CONTRACT;
111 return (count_t)k.m_dwSize ^ (count_t)k.m_kind ^ HashBytes((BYTE *)k.m_data, k.m_dwSize);
114 static const element_t Null() { LIMITED_METHOD_CONTRACT; return NULL; }
115 static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
118 DataImage::DataImage(Module *module, CEEPreloader *preloader)
120 m_preloader(preloader),
121 m_iCurrentFixup(0), // Dev11 bug 181494 instrumentation
122 m_pInternedStructures(NULL),
123 m_pCurrentAssociatedMethodTable(NULL)
125 m_pZapImage = m_preloader->GetDataStore()->GetZapImage();
126 m_pZapImage->m_pDataImage = this;
128 m_pInternedStructures = new InternedStructureHashTable();
129 m_inlineTrackingMap = new InlineTrackingMap();
132 DataImage::~DataImage()
134 delete m_pInternedStructures;
135 delete m_inlineTrackingMap;
138 void DataImage::PreSave()
140 #ifndef ZAP_HASHTABLE_TUNING
145 void DataImage::PostSave()
147 #ifdef ZAP_HASHTABLE_TUNING
148 // If ZAP_HASHTABLE_TUNING is defined, preallocate is overloaded to print the tunning constants
153 DWORD DataImage::GetMethodProfilingFlags(MethodDesc * pMD)
155 STANDARD_VM_CONTRACT;
157 // We are not differentiating unboxing stubs vs. normal method descs in IBC data yet
158 if (pMD->IsUnboxingStub())
159 pMD = pMD->GetWrappedMethodDesc();
161 const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD);
162 return (pData != NULL) ? pData->flags : 0;
165 void DataImage::SetMethodProfilingFlags(MethodDesc * pMD, DWORD flags)
167 STANDARD_VM_CONTRACT;
169 const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD);
172 const_cast<MethodProfilingData *>(pData)->flags |= flags;
176 MethodProfilingData data;
179 m_methodProfilingData.Add(data);
182 void DataImage::Preallocate()
184 STANDARD_VM_CONTRACT;
186 // TODO: Move to ZapImage
188 PEDecoder pe((void *)m_module->GetFile()->GetManagedFileContents());
190 COUNT_T cbILImage = pe.GetSize();
192 // Curb the estimate to handle corner cases gracefuly
193 cbILImage = min(cbILImage, 50000000);
195 PREALLOCATE_HASHTABLE(DataImage::m_structures, 0.019, cbILImage);
196 PREALLOCATE_ARRAY(DataImage::m_structuresInOrder, 0.0088, cbILImage);
197 PREALLOCATE_ARRAY(DataImage::m_Fixups, 0.046, cbILImage);
198 PREALLOCATE_HASHTABLE(DataImage::m_surrogates, 0.0025, cbILImage);
199 PREALLOCATE_HASHTABLE((*DataImage::m_pInternedStructures), 0.0007, cbILImage);
202 ZapHeap * DataImage::GetHeap()
204 LIMITED_METHOD_CONTRACT;
205 return m_pZapImage->GetHeap();
208 void DataImage::AddStructureInOrder(ZapNode *pNode, BOOL fMaintainSaveOrder /*=FALSE*/)
212 SavedNodeEntry entry;
214 entry.dwAssociatedOrder = 0;
216 if (fMaintainSaveOrder)
218 entry.dwAssociatedOrder = MAINTAIN_SAVE_ORDER;
220 else if (m_pCurrentAssociatedMethodTable)
222 TypeHandle th = TypeHandle(m_pCurrentAssociatedMethodTable);
223 entry.dwAssociatedOrder = m_pZapImage->LookupClassLayoutOrder(CORINFO_CLASS_HANDLE(th.AsPtr()));
226 m_structuresInOrder.Append(entry);
229 ZapStoredStructure * DataImage::StoreStructureHelper(const void *data, SIZE_T size,
230 DataImage::ItemKind kind,
232 BOOL fMaintainSaveOrder)
234 STANDARD_VM_CONTRACT;
236 S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapStoredStructure)) + S_SIZE_T(size);
237 if(cbAllocSize.IsOverflow())
238 ThrowHR(COR_E_OVERFLOW);
240 void * pMemory = new (GetHeap()) BYTE[cbAllocSize.Value()];
242 // PE files cannot be larger than 4 GB
243 if (DWORD(size) != size)
244 ThrowHR(E_UNEXPECTED);
246 ZapStoredStructure * pStructure = new (pMemory) ZapStoredStructure((DWORD)size, static_cast<BYTE>(kind), static_cast<BYTE>(align));
250 CopyMemory(pStructure->GetData(), data, size);
251 BindPointer(data, pStructure, 0);
254 m_pLastLookup = NULL;
256 AddStructureInOrder(pStructure, fMaintainSaveOrder);
261 // Bind pointer to the relative offset in ZapNode
262 void DataImage::BindPointer(const void *p, ZapNode * pNode, SSIZE_T offset)
264 STANDARD_VM_CONTRACT;
266 _ASSERTE(m_structures.LookupPtr(p) == NULL);
274 m_pLastLookup = NULL;
277 void DataImage::CopyData(ZapStoredStructure * pNode, const void * p, ULONG size)
279 memcpy(pNode->GetData(), p, size);
282 void DataImage::CopyDataToOffset(ZapStoredStructure * pNode, ULONG offset, const void * p, ULONG size)
284 SIZE_T target = (SIZE_T) (pNode->GetData());
287 memcpy((void *) target, p, size);
290 void DataImage::PlaceStructureForAddress(const void * data, CorCompileSection section)
292 STANDARD_VM_CONTRACT;
297 const StructureEntry * pEntry = m_structures.LookupPtr(data);
301 ZapNode * pNode = pEntry->pNode;
302 if (!pNode->IsPlaced())
304 ZapVirtualSection * pSection = m_pZapImage->GetSection(section);
305 pSection->Place(pNode);
309 void DataImage::PlaceInternedStructureForAddress(const void * data, CorCompileSection sectionIfReused, CorCompileSection sectionIfSingleton)
311 STANDARD_VM_CONTRACT;
316 const StructureEntry * pEntry = m_structures.LookupPtr(data);
320 ZapNode * pNode = pEntry->pNode;
321 if (!pNode->IsPlaced())
323 CorCompileSection section = m_reusedStructures.Contains(pNode) ? sectionIfReused : sectionIfSingleton;
324 ZapVirtualSection * pSection = m_pZapImage->GetSection(section);
325 pSection->Place(pNode);
329 void DataImage::FixupPointerField(PVOID p, SSIZE_T offset)
331 STANDARD_VM_CONTRACT;
333 PVOID pTarget = *(PVOID UNALIGNED *)((BYTE *)p + offset);
337 ZeroPointerField(p, offset);
341 FixupField(p, offset, pTarget);
344 void DataImage::FixupRelativePointerField(PVOID p, SSIZE_T offset)
346 STANDARD_VM_CONTRACT;
348 PVOID pTarget = RelativePointer<PTR_VOID>::GetValueMaybeNullAtPtr((TADDR)p + offset);
352 ZeroPointerField(p, offset);
356 FixupField(p, offset, pTarget, 0, IMAGE_REL_BASED_RELPTR);
359 static void EncodeTargetOffset(PVOID pLocation, SSIZE_T targetOffset, ZapRelocationType type)
361 // Store the targetOffset into the location of the reloc temporarily
364 case IMAGE_REL_BASED_PTR:
365 case IMAGE_REL_BASED_RELPTR:
366 *(UNALIGNED TADDR *)pLocation = (TADDR)targetOffset;
369 case IMAGE_REL_BASED_ABSOLUTE:
370 *(UNALIGNED DWORD *)pLocation = (DWORD)targetOffset;
373 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
374 _ASSERTE(targetOffset == 0);
375 *(UNALIGNED TADDR *)pLocation = 0;
378 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
379 case IMAGE_REL_BASED_REL32:
380 *(UNALIGNED INT32 *)pLocation = (INT32)targetOffset;
382 #endif // _TARGET_X86_ || _TARGET_AMD64_
389 static SSIZE_T DecodeTargetOffset(PVOID pLocation, ZapRelocationType type)
391 // Store the targetOffset into the location of the reloc temporarily
394 case IMAGE_REL_BASED_PTR:
395 case IMAGE_REL_BASED_RELPTR:
396 return (SSIZE_T)*(UNALIGNED TADDR *)pLocation;
398 case IMAGE_REL_BASED_ABSOLUTE:
399 return *(UNALIGNED DWORD *)pLocation;
401 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
402 _ASSERTE(*(UNALIGNED TADDR *)pLocation == 0);
405 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
406 case IMAGE_REL_BASED_REL32:
407 return *(UNALIGNED INT32 *)pLocation;
408 #endif // _TARGET_X86_ || _TARGET_AMD64_
416 void DataImage::FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset, ZapRelocationType type)
418 STANDARD_VM_CONTRACT;
420 m_iCurrentFixup++; // Dev11 bug 181494 instrumentation
422 const StructureEntry * pEntry = m_pLastLookup;
423 if (pEntry == NULL || pEntry->ptr != p)
425 pEntry = m_structures.LookupPtr(p);
426 _ASSERTE(pEntry != NULL &&
427 "StoreStructure or BindPointer have to be called on all save data.");
428 m_pLastLookup = pEntry;
430 offset += pEntry->offset;
431 _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
433 const StructureEntry * pTargetEntry = m_pLastLookup;
434 if (pTargetEntry == NULL || pTargetEntry->ptr != pTarget)
436 pTargetEntry = m_structures.LookupPtr(pTarget);
438 _ASSERTE(pTargetEntry != NULL &&
439 "The target of the fixup is not saved into the image");
441 targetOffset += pTargetEntry->offset;
442 _ASSERTE(0 <= targetOffset && (DWORD)targetOffset <= pTargetEntry->pNode->GetSize());
446 entry.m_offset = (DWORD)offset;
447 entry.m_pLocation = AsStoredStructure(pEntry->pNode);
448 entry.m_pTargetNode = pTargetEntry->pNode;
451 EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type);
454 void DataImage::FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset, ZapRelocationType type)
456 STANDARD_VM_CONTRACT;
458 m_iCurrentFixup++; // Dev11 bug 181494 instrumentation
460 const StructureEntry * pEntry = m_pLastLookup;
461 if (pEntry == NULL || pEntry->ptr != p)
463 pEntry = m_structures.LookupPtr(p);
464 _ASSERTE(pEntry != NULL &&
465 "StoreStructure or BindPointer have to be called on all save data.");
466 m_pLastLookup = pEntry;
468 offset += pEntry->offset;
469 _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
471 _ASSERTE(pTarget != NULL);
475 entry.m_offset = (DWORD)offset;
476 entry.m_pLocation = AsStoredStructure(pEntry->pNode);
477 entry.m_pTargetNode = pTarget;
480 EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type);
483 DWORD DataImage::GetRVA(const void *data)
485 STANDARD_VM_CONTRACT;
487 const StructureEntry * pEntry = m_structures.LookupPtr(data);
488 _ASSERTE(pEntry != NULL);
490 return pEntry->pNode->GetRVA() + (DWORD)pEntry->offset;
493 void DataImage::ZeroField(PVOID p, SSIZE_T offset, SIZE_T size)
495 STANDARD_VM_CONTRACT;
497 ZeroMemory(GetImagePointer(p, offset), size);
500 void * DataImage::GetImagePointer(ZapStoredStructure * pNode)
502 return pNode->GetData();
505 void * DataImage::GetImagePointer(PVOID p, SSIZE_T offset)
507 STANDARD_VM_CONTRACT;
509 const StructureEntry * pEntry = m_pLastLookup;
510 if (pEntry == NULL || pEntry->ptr != p)
512 pEntry = m_structures.LookupPtr(p);
513 _ASSERTE(pEntry != NULL &&
514 "StoreStructure or BindPointer have to be called on all save data.");
515 m_pLastLookup = pEntry;
517 offset += pEntry->offset;
518 _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
520 return (BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset;
523 ZapNode * DataImage::GetNodeForStructure(PVOID p, SSIZE_T * pOffset)
525 const StructureEntry * pEntry = m_pLastLookup;
526 if (pEntry == NULL || pEntry->ptr != p)
528 pEntry = m_structures.LookupPtr(p);
529 _ASSERTE(pEntry != NULL &&
530 "StoreStructure or BindPointer have to be called on all save data.");
532 *pOffset = pEntry->offset;
533 return pEntry->pNode;
536 ZapStoredStructure * DataImage::StoreInternedStructure(const void *data, ULONG size,
537 DataImage::ItemKind kind,
540 STANDARD_VM_CONTRACT;
542 ZapStoredStructure * pStructure = m_pInternedStructures->Lookup(InternedStructureKey(data, size, kind));
544 if (pStructure != NULL)
546 // Just add a new mapping for to the interned structure
547 BindPointer(data, pStructure, 0);
549 // Track that this structure has been successfully reused by interning
550 NoteReusedStructure(data);
554 // We have not seen this structure yet. Create a new one.
555 pStructure = StoreStructure(data, size, kind);
556 m_pInternedStructures->Add(pStructure);
562 void DataImage::NoteReusedStructure(const void *data)
564 STANDARD_VM_CONTRACT;
566 _ASSERTE(IsStored(data));
568 const StructureEntry * pEntry = m_structures.LookupPtr(data);
570 if (!m_reusedStructures.Contains(pEntry->pNode))
572 m_reusedStructures.Add(pEntry->pNode);
576 // Save the info of an RVA into m_rvaInfoVector.
577 void DataImage::StoreRvaInfo(FieldDesc * pFD,
582 RvaInfoStructure rvaInfo;
584 _ASSERTE(m_module == pFD->GetModule());
585 _ASSERTE(m_module == pFD->GetLoaderModule());
590 rvaInfo.align = align;
592 m_rvaInfoVector.Append(rvaInfo);
595 // qsort compare function.
596 // Primary key: rva (ascending order). Secondary key: size (descending order).
597 int __cdecl DataImage::rvaInfoVectorEntryCmp(const void* a_, const void* b_)
599 LIMITED_METHOD_CONTRACT;
600 STATIC_CONTRACT_SO_TOLERANT;
601 DataImage::RvaInfoStructure *a = (DataImage::RvaInfoStructure *)a_;
602 DataImage::RvaInfoStructure *b = (DataImage::RvaInfoStructure *)b_;
603 int rvaComparisonResult = (int)(a->rva - b->rva);
604 if (rvaComparisonResult!=0)
605 return rvaComparisonResult; // Ascending order on rva
606 return (int)(b->size - a->size); // Descending order on size
609 // Sort the list of RVA statics in an ascending order wrt the RVA and save them.
610 // For RVA structures with the same RVA, we will only store the one with the largest size.
611 void DataImage::SaveRvaStructure()
613 if (m_rvaInfoVector.IsEmpty())
614 return; // No RVA static to save
616 // Use qsort to sort the m_rvaInfoVector
617 qsort (&m_rvaInfoVector[0], // start of array
618 m_rvaInfoVector.GetCount(), // array size in elements
619 sizeof(RvaInfoStructure), // element size in bytes
620 rvaInfoVectorEntryCmp); // comparere function
622 RvaInfoStructure * previousRvaInfo = NULL;
624 for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
626 RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
628 // Verify that rvaInfo->rva are actually monotonically increasing and
629 // rvaInfo->size are monotonically decreasing if rva are the same.
630 _ASSERTE(previousRvaInfo==NULL ||
631 previousRvaInfo->rva < rvaInfo->rva ||
632 previousRvaInfo->rva == rvaInfo->rva && previousRvaInfo->size >= rvaInfo->size
635 if (previousRvaInfo==NULL || previousRvaInfo->rva != rvaInfo->rva) {
636 void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
638 // Note that we force the structures to be laid out in the order we save them
639 StoreStructureInOrder(pRVAData, rvaInfo->size,
640 DataImage::ITEM_RVA_STATICS,
644 previousRvaInfo = rvaInfo;
648 void DataImage::RegisterSurrogate(PVOID ptr, PVOID surrogate)
650 STANDARD_VM_CONTRACT;
652 m_surrogates.Add(ptr, surrogate);
655 PVOID DataImage::LookupSurrogate(PVOID ptr)
657 STANDARD_VM_CONTRACT;
659 const KeyValuePair<PVOID, PVOID> * pEntry = m_surrogates.LookupPtr(ptr);
662 return pEntry->Value();
665 // Please read comments in corcompile.h for ZapVirtualSectionType before
666 // putting data items into sections.
667 FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
669 LIMITED_METHOD_CONTRACT;
674 case NodeTypeForItemKind(DataImage::ITEM_MODULE):
675 return CORCOMPILE_SECTION_MODULE;
677 // CORCOMPILE_SECTION_WRITE (Hot Writeable)
678 // things only go in here if they are:
679 // (a) explicitly identified by profiling data
680 // or (b) if we have no profiling for these items but they are frequently written to
681 case NodeTypeForItemKind(DataImage::ITEM_FILEREF_MAP):
682 case NodeTypeForItemKind(DataImage::ITEM_ASSEMREF_MAP):
683 case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_TABLE):
684 case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_ENTRY):
685 case NodeTypeForItemKind(DataImage::ITEM_CER_RESTORE_FLAGS):
686 return CORCOMPILE_SECTION_WRITE;
688 // CORCOMPILE_SECTION_WRITEABLE (Cold Writeable)
689 case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_SPECIAL_WRITEABLE):
690 case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE):
691 case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_WRITEABLE):
692 case NodeTypeForItemKind(DataImage::ITEM_FROZEN_OBJECTS): // sometimes the objhdr is modified
693 return CORCOMPILE_SECTION_WRITEABLE;
696 // Other things go in here if
697 // (a) identified as reads by the profiling runs
698 // (b) if we have no profiling for these items but are identified as typically being read
699 case NodeTypeForItemKind(DataImage::ITEM_CER_ROOT_TABLE):
700 case NodeTypeForItemKind(DataImage::ITEM_RID_MAP_HOT):
701 case NodeTypeForItemKind(DataImage::ITEM_BINDER):
702 case NodeTypeForItemKind(DataImage::ITEM_MODULE_SECDESC):
703 case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT):
704 return CORCOMPILE_SECTION_HOT;
706 case NodeTypeForItemKind(DataImage::ITEM_BINDER_ITEMS): // these are the guaranteed to be hot items
707 return CORCOMPILE_SECTION_READONLY_SHARED_HOT;
709 // SECTION_READONLY_HOT
710 case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_HOT): // this is assumed to be hot. it is not written to.
711 case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_HOT):
712 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT):
713 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT):
714 return CORCOMPILE_SECTION_READONLY_HOT;
716 // SECTION_HOT_WRITEABLE
717 case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT_WRITEABLE):
718 case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE):
719 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_HOT):
720 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_HOT):
721 return CORCOMPILE_SECTION_HOT_WRITEABLE;
723 case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT_WRITEABLE):
724 return CORCOMPILE_SECTION_METHOD_PRECODE_WRITE;
726 case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT):
727 return CORCOMPILE_SECTION_METHOD_PRECODE_HOT;
729 // SECTION_RVA_STATICS
730 case NodeTypeForItemKind(DataImage::ITEM_RVA_STATICS):
731 return CORCOMPILE_SECTION_RVA_STATICS_COLD; // This MUST go in this section
734 case NodeTypeForItemKind(DataImage::ITEM_GUID_INFO):
735 case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_LAYOUT):
736 case NodeTypeForItemKind(DataImage::ITEM_EECLASS_WARM):
737 return CORCOMPILE_SECTION_WARM;
739 // SECTION_READONLY_WARM
740 case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE):
741 case NodeTypeForItemKind(DataImage::ITEM_INTERFACE_MAP):
742 case NodeTypeForItemKind(DataImage::ITEM_DISPATCH_MAP):
743 case NodeTypeForItemKind(DataImage::ITEM_GENERICS_STATIC_FIELDDESCS):
744 case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_COLD):
745 case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_COLD):
746 case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_NAME):
747 case NodeTypeForItemKind(DataImage::ITEM_PROPERTY_NAME_SET):
748 case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM):
749 return CORCOMPILE_SECTION_READONLY_WARM;
751 case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
752 case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
753 return CORCOMPILE_SECTION_READONLY_VCHUNKS_AND_DICTIONARY;
755 // SECTION_CLASS_COLD
756 case NodeTypeForItemKind(DataImage::ITEM_PARAM_TYPEDESC):
757 case NodeTypeForItemKind(DataImage::ITEM_ARRAY_TYPEDESC):
758 case NodeTypeForItemKind(DataImage::ITEM_EECLASS):
759 case NodeTypeForItemKind(DataImage::ITEM_FIELD_MARSHALERS):
760 case NodeTypeForItemKind(DataImage::ITEM_FPTR_TYPEDESC):
761 #ifdef FEATURE_COMINTEROP
762 case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_TABLE):
763 #endif // FEATURE_COMINTEROP
764 return CORCOMPILE_SECTION_CLASS_COLD;
766 //SECTION_READONLY_COLD
767 case NodeTypeForItemKind(DataImage::ITEM_FIELD_DESC_LIST):
768 case NodeTypeForItemKind(DataImage::ITEM_ENUM_VALUES):
769 case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME_POINTERS):
770 case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME):
771 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD):
772 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD):
773 case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY):
774 #ifdef FEATURE_COMINTEROP
775 case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_ENTRIES):
776 #endif // FEATURE_COMINTEROP
777 case NodeTypeForItemKind(DataImage::ITEM_CLASS_VARIANCE_INFO):
778 return CORCOMPILE_SECTION_READONLY_COLD;
780 // SECTION_CROSS_DOMAIN_INFO
781 case NodeTypeForItemKind(DataImage::ITEM_CROSS_DOMAIN_INFO):
782 case NodeTypeForItemKind(DataImage::ITEM_VTS_INFO):
783 return CORCOMPILE_SECTION_CROSS_DOMAIN_INFO;
785 // SECTION_METHOD_DESC_COLD
786 case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD):
787 return CORCOMPILE_SECTION_METHOD_DESC_COLD;
789 case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE):
790 case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG):
791 return CORCOMPILE_SECTION_METHOD_DESC_COLD_WRITEABLE;
793 case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD):
794 return CORCOMPILE_SECTION_METHOD_PRECODE_COLD;
796 case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD_WRITEABLE):
797 return CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE;
799 // SECTION_MODULE_COLD
800 case NodeTypeForItemKind(DataImage::ITEM_TYPEDEF_MAP):
801 case NodeTypeForItemKind(DataImage::ITEM_TYPEREF_MAP):
802 case NodeTypeForItemKind(DataImage::ITEM_METHODDEF_MAP):
803 case NodeTypeForItemKind(DataImage::ITEM_FIELDDEF_MAP):
804 case NodeTypeForItemKind(DataImage::ITEM_MEMBERREF_MAP):
805 case NodeTypeForItemKind(DataImage::ITEM_GENERICPARAM_MAP):
806 case NodeTypeForItemKind(DataImage::ITEM_GENERICTYPEDEF_MAP):
807 case NodeTypeForItemKind(DataImage::ITEM_PROPERTYINFO_MAP):
808 case NodeTypeForItemKind(DataImage::ITEM_TYVAR_TYPEDESC):
809 case NodeTypeForItemKind(DataImage::ITEM_EECLASS_COLD):
810 case NodeTypeForItemKind(DataImage::ITEM_CER_METHOD_LIST):
811 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_COLD):
812 case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_COLD):
813 return CORCOMPILE_SECTION_MODULE_COLD;
815 // SECTION_DEBUG_COLD
816 case NodeTypeForItemKind(DataImage::ITEM_DEBUG):
817 case NodeTypeForItemKind(DataImage::ITEM_INLINING_DATA):
818 return CORCOMPILE_SECTION_DEBUG_COLD;
820 // SECTION_COMPRESSED_MAPS
821 case NodeTypeForItemKind(DataImage::ITEM_COMPRESSED_MAP):
822 return CORCOMPILE_SECTION_COMPRESSED_MAPS;
825 _ASSERTE(!"Missing mapping between type and section");
826 return CORCOMPILE_SECTION_MODULE_COLD;
830 static int __cdecl LayoutOrderCmp(const void* a_, const void* b_)
832 DWORD a = ((DataImage::SavedNodeEntry*)a_)->dwAssociatedOrder;
833 DWORD b = ((DataImage::SavedNodeEntry*)b_)->dwAssociatedOrder;
841 return (a < b) ? -1 : 0;
845 void DataImage::PlaceRemainingStructures()
847 if (m_pZapImage->HasClassLayoutOrder())
849 // The structures are currently in save order; since we are going to change
850 // that to class layout order, first place any that require us to maintain save order.
851 // Note that this is necessary because qsort is not stable.
852 for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++)
854 if (m_structuresInOrder[iStructure].dwAssociatedOrder == MAINTAIN_SAVE_ORDER)
856 ZapNode * pStructure = m_structuresInOrder[iStructure].pNode;
857 if (!pStructure->IsPlaced())
859 ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType()));
860 pSection->Place(pStructure);
865 qsort(&m_structuresInOrder[0], m_structuresInOrder.GetCount(), sizeof(SavedNodeEntry), LayoutOrderCmp);
868 // Place the unplaced structures, which may have been re-sorted according to class-layout order
869 for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++)
871 ZapNode * pStructure = m_structuresInOrder[iStructure].pNode;
872 if (!pStructure->IsPlaced())
874 ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType()));
875 pSection->Place(pStructure);
880 int __cdecl DataImage::fixupEntryCmp(const void* a_, const void* b_)
882 LIMITED_METHOD_CONTRACT;
883 FixupEntry *a = (FixupEntry *)a_;
884 FixupEntry *b = (FixupEntry *)b_;
885 return (a->m_pLocation->GetRVA() + a->m_offset) - (b->m_pLocation->GetRVA() + b->m_offset);
888 void DataImage::FixupRVAs()
890 STANDARD_VM_CONTRACT;
896 // Dev11 bug 181494 instrumentation
897 if (m_Fixups.GetCount() != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
899 qsort(&m_Fixups[0], m_Fixups.GetCount(), sizeof(FixupEntry), fixupEntryCmp);
906 entry.m_pLocation = NULL;
907 entry.m_pTargetNode = NULL;
909 m_Fixups.Append(entry);
911 // Dev11 bug 181494 instrumentation
912 if (m_Fixups.GetCount() -1 != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
917 void DataImage::SetRVAsForFields(IMetaDataEmit * pEmit)
919 for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
921 RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
923 void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
925 DWORD dwOffset = GetRVA(pRVAData);
927 pEmit->SetRVA(rvaInfo->pFD->GetMemberDef(), dwOffset);
931 void ZapStoredStructure::Save(ZapWriter * pWriter)
933 DataImage * image = ZapImage::GetImage(pWriter)->m_pDataImage;
935 DataImage::FixupEntry * pPrevFixupEntry = NULL;
939 DataImage::FixupEntry * pFixupEntry = &(image->m_Fixups[image->m_iCurrentFixup]);
941 if (pFixupEntry->m_pLocation != this)
943 _ASSERTE(pFixupEntry->m_pLocation == NULL ||
944 GetRVA() + GetSize() <= pFixupEntry->m_pLocation->GetRVA());
948 PVOID pLocation = (BYTE *)GetData() + pFixupEntry->m_offset;
950 if (pPrevFixupEntry == NULL || pPrevFixupEntry->m_offset != pFixupEntry->m_offset)
952 SSIZE_T targetOffset = DecodeTargetOffset(pLocation, pFixupEntry->m_type);
955 // All pointers in EE datastructures should be aligned. This is important to
956 // avoid stradling relocations that cause issues with ASLR.
957 if (pFixupEntry->m_type == IMAGE_REL_BASED_PTR)
959 _ASSERTE(IS_ALIGNED(pWriter->GetCurrentRVA() + pFixupEntry->m_offset, sizeof(TADDR)));
963 ZapImage::GetImage(pWriter)->WriteReloc(
965 pFixupEntry->m_offset,
966 pFixupEntry->m_pTargetNode,
968 pFixupEntry->m_type);
972 // It's fine to have duplicate fixup entries, but they must target the same data.
973 // If this assert fires, Fixup* was called twice on the same field in an NGen'd
974 // structure with different targets, which likely indicates the current structure
975 // was illegally interned or shared.
976 _ASSERTE(pPrevFixupEntry->m_type == pFixupEntry->m_type);
977 _ASSERTE(pPrevFixupEntry->m_pTargetNode== pFixupEntry->m_pTargetNode);
980 pPrevFixupEntry = pFixupEntry;
981 image->m_iCurrentFixup++;
984 pWriter->Write(GetData(), m_dwSize);
987 void DataImage::FixupSectionRange(SIZE_T offset, ZapNode * pNode)
989 STANDARD_VM_CONTRACT;
991 if (pNode->GetSize() != 0)
993 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
995 SIZE_T * pSize = (SIZE_T *)((BYTE *)GetImagePointer(m_module->m_pNGenLayoutInfo) + offset + sizeof(TADDR));
996 *pSize = pNode->GetSize();
1000 void DataImage::FixupSectionPtr(SIZE_T offset, ZapNode * pNode)
1002 if (pNode->GetSize() != 0)
1003 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
1006 void DataImage::FixupJumpStubPtr(SIZE_T offset, CorInfoHelpFunc ftnNum)
1008 ZapNode * pNode = m_pZapImage->GetHelperThunkIfExists(ftnNum);
1010 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
1013 void DataImage::FixupModuleRVAs()
1015 STANDARD_VM_CONTRACT;
1017 FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[0]), m_pZapImage->m_pHotCodeSection);
1018 FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[1]), m_pZapImage->m_pCodeSection);
1019 FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[2]), m_pZapImage->m_pColdCodeSection);
1021 NGenLayoutInfo * pSavedNGenLayoutInfo = (NGenLayoutInfo *)GetImagePointer(m_module->m_pNGenLayoutInfo);
1023 COUNT_T nHotRuntimeFunctions = m_pZapImage->m_pHotRuntimeFunctionSection->GetNodeCount();
1024 if (nHotRuntimeFunctions != 0)
1026 pSavedNGenLayoutInfo->m_nRuntimeFunctions[0] = nHotRuntimeFunctions;
1028 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[0]), m_pZapImage->m_pHotRuntimeFunctionLookupSection);
1029 pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[0] = m_pZapImage->m_pHotRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1;
1031 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[0]), m_pZapImage->m_pHotCodeMethodDescsSection);
1033 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[0]), m_pZapImage->m_pHotRuntimeFunctionSection);
1036 COUNT_T nRuntimeFunctions = m_pZapImage->m_pRuntimeFunctionSection->GetNodeCount();
1037 if (nRuntimeFunctions != 0)
1039 pSavedNGenLayoutInfo->m_nRuntimeFunctions[1] = nRuntimeFunctions;
1041 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[1]), m_pZapImage->m_pRuntimeFunctionLookupSection);
1042 pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[1] = m_pZapImage->m_pRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1;
1044 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[1]), m_pZapImage->m_pCodeMethodDescsSection);
1046 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[1]), m_pZapImage->m_pRuntimeFunctionSection);
1049 COUNT_T nColdRuntimeFunctions = m_pZapImage->m_pColdRuntimeFunctionSection->GetNodeCount();
1050 if (nColdRuntimeFunctions != 0)
1052 pSavedNGenLayoutInfo->m_nRuntimeFunctions[2] = nColdRuntimeFunctions;
1054 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[2]), m_pZapImage->m_pColdRuntimeFunctionSection);
1057 if (m_pZapImage->m_pColdCodeMapSection->GetNodeCount() != 0)
1059 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_ColdCodeMap), m_pZapImage->m_pColdCodeMapSection);
1062 FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[0]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_HOT));
1063 FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[1]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD));
1064 FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[2]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_WRITE));
1065 FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[3]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE));
1067 FixupSectionRange(offsetof(NGenLayoutInfo, m_JumpStubs), m_pZapImage->m_pHelperTableSection);
1068 FixupSectionRange(offsetof(NGenLayoutInfo, m_StubLinkStubs), m_pZapImage->m_pStubsSection);
1069 FixupSectionRange(offsetof(NGenLayoutInfo, m_VirtualMethodThunks), m_pZapImage->m_pVirtualImportThunkSection);
1070 FixupSectionRange(offsetof(NGenLayoutInfo, m_ExternalMethodThunks), m_pZapImage->m_pExternalMethodThunkSection);
1072 if (m_pZapImage->m_pExceptionInfoLookupTable->GetSize() != 0)
1073 FixupSectionRange(offsetof(NGenLayoutInfo, m_ExceptionInfoLookupTable), m_pZapImage->m_pExceptionInfoLookupTable);
1075 FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrestubJumpStub), CORINFO_HELP_EE_PRESTUB);
1076 #ifdef HAS_FIXUP_PRECODE
1077 FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrecodeFixupJumpStub), CORINFO_HELP_EE_PRECODE_FIXUP);
1079 FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pVirtualImportFixupJumpStub), CORINFO_HELP_EE_VTABLE_FIXUP);
1080 FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pExternalMethodFixupJumpStub), CORINFO_HELP_EE_EXTERNAL_FIXUP);
1082 ZapNode * pFilterPersonalityRoutine = m_pZapImage->GetHelperThunkIfExists(CORINFO_HELP_EE_PERSONALITY_ROUTINE_FILTER_FUNCLET);
1083 if (pFilterPersonalityRoutine != NULL)
1084 FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_rvaFilterPersonalityRoutine), pFilterPersonalityRoutine, 0, IMAGE_REL_BASED_ABSOLUTE);
1087 void DataImage::FixupRvaStructure()
1089 STANDARD_VM_CONTRACT;
1091 for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
1093 RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
1095 void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
1097 DWORD dwOffset = GetRVA(pRVAData);
1099 FieldDesc * pNewFD = (FieldDesc *)GetImagePointer(rvaInfo->pFD);
1100 pNewFD->SetOffset(dwOffset);
1104 ZapNode * DataImage::GetCodeAddress(MethodDesc * method)
1106 ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method);
1107 return (pMethod != NULL) ? pMethod->GetCode() : NULL;
1110 BOOL DataImage::CanDirectCall(MethodDesc * method, CORINFO_ACCESS_FLAGS accessFlags)
1112 return m_pZapImage->canIntraModuleDirectCall(NULL, (CORINFO_METHOD_HANDLE)method, NULL, accessFlags);
1115 ZapNode * DataImage::GetFixupList(MethodDesc * method)
1117 ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method);
1118 return (pMethod != NULL) ? pMethod->GetFixupList() : NULL;
1121 ZapNode * DataImage::GetHelperThunk(CorInfoHelpFunc ftnNum)
1123 return m_pZapImage->GetHelperThunk(ftnNum);
1126 ZapNode * DataImage::GetTypeHandleImport(TypeHandle th, PVOID pUniqueId)
1128 ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr()), pUniqueId);
1129 if (!pImport->IsPlaced())
1130 m_pZapImage->GetImportTable()->PlaceImport(pImport);
1134 ZapNode * DataImage::GetMethodHandleImport(MethodDesc * pMD)
1136 ZapImport * pImport = m_pZapImage->GetImportTable()->GetMethodHandleImport(CORINFO_METHOD_HANDLE(pMD));
1137 if (!pImport->IsPlaced())
1138 m_pZapImage->GetImportTable()->PlaceImport(pImport);
1142 ZapNode * DataImage::GetFieldHandleImport(FieldDesc * pMD)
1144 ZapImport * pImport = m_pZapImage->GetImportTable()->GetFieldHandleImport(CORINFO_FIELD_HANDLE(pMD));
1145 if (!pImport->IsPlaced())
1146 m_pZapImage->GetImportTable()->PlaceImport(pImport);
1150 ZapNode * DataImage::GetModuleHandleImport(Module * pModule)
1152 ZapImport * pImport = m_pZapImage->GetImportTable()->GetModuleHandleImport(CORINFO_MODULE_HANDLE(pModule));
1153 if (!pImport->IsPlaced())
1154 m_pZapImage->GetImportTable()->PlaceImport(pImport);
1158 DWORD DataImage::GetModuleImportIndex(Module * pModule)
1160 return m_pZapImage->GetImportTable()->GetIndexOfModule((CORINFO_MODULE_HANDLE)pModule);
1163 ZapNode * DataImage::GetExistingTypeHandleImport(TypeHandle th)
1165 ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr()));
1166 return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
1169 ZapNode * DataImage::GetExistingMethodHandleImport(MethodDesc * pMD)
1171 ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingMethodHandleImport(CORINFO_METHOD_HANDLE(pMD));
1172 return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
1175 ZapNode * DataImage::GetExistingFieldHandleImport(FieldDesc * pFD)
1177 ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingFieldHandleImport(CORINFO_FIELD_HANDLE(pFD));
1178 return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
1181 ZapNode * DataImage::GetVirtualImportThunk(MethodTable * pMT, MethodDesc * pMD, int slotNumber)
1183 _ASSERTE(pMD == pMT->GetMethodDescForSlot(slotNumber));
1184 _ASSERTE(!pMD->IsGenericMethodDefinition());
1186 ZapImport * pImport = m_pZapImage->GetImportTable()->GetVirtualImportThunk(CORINFO_METHOD_HANDLE(pMD), slotNumber);
1187 if (!pImport->IsPlaced())
1188 m_pZapImage->GetImportTable()->PlaceVirtualImportThunk(pImport);
1192 ZapNode * DataImage::GetGenericSignature(PVOID signature, BOOL fMethod)
1194 ZapGenericSignature * pGenericSignature = m_pZapImage->GetImportTable()->GetGenericSignature(signature, fMethod);
1195 if (!pGenericSignature->IsPlaced())
1196 m_pZapImage->GetImportTable()->PlaceBlob(pGenericSignature);
1197 return pGenericSignature;
1200 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1202 class ZapStubPrecode : public ZapNode
1206 DataImage::ItemKind m_kind;
1209 ZapStubPrecode(MethodDesc * pMethod, DataImage::ItemKind kind)
1210 : m_pMD(pMethod), m_kind(kind)
1214 virtual DWORD GetSize()
1216 return sizeof(StubPrecode);
1219 virtual UINT GetAlignment()
1221 return PRECODE_ALIGNMENT;
1224 virtual ZapNodeType GetType()
1226 return NodeTypeForItemKind(m_kind);
1229 virtual DWORD ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos)
1231 dwPos = AlignUp(dwPos, GetAlignment());
1233 // Alignment for straddlers. Need a cast to help gcc choose between AlignmentTrim(UINT,UINT) and (UINT64,UINT).
1234 if (AlignmentTrim(static_cast<UINT>(dwPos + offsetof(StubPrecode, m_pMethodDesc)), RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR))
1235 dwPos += GetAlignment();
1244 virtual void Save(ZapWriter * pZapWriter)
1246 ZapImage * pImage = ZapImage::GetImage(pZapWriter);
1248 StubPrecode precode;
1250 precode.Init(m_pMD);
1253 ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
1254 pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc),
1255 pNode, (int)offset, IMAGE_REL_BASED_PTR);
1257 pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32),
1258 pImage->GetHelperThunk(CORINFO_HELP_EE_PRESTUB), 0, IMAGE_REL_BASED_REL32);
1260 pZapWriter->Write(&precode, sizeof(precode));
1264 #ifdef HAS_NDIRECT_IMPORT_PRECODE
1265 class ZapNDirectImportPrecode : public ZapStubPrecode
1268 ZapNDirectImportPrecode(MethodDesc * pMD, DataImage::ItemKind kind)
1269 : ZapStubPrecode(pMD, kind)
1273 virtual void Save(ZapWriter * pZapWriter)
1275 ZapImage * pImage = ZapImage::GetImage(pZapWriter);
1277 StubPrecode precode;
1279 precode.Init(m_pMD);
1282 ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
1283 pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc),
1284 pNode, (int)offset, IMAGE_REL_BASED_PTR);
1286 pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32),
1287 pImage->GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP), 0, IMAGE_REL_BASED_REL32);
1289 pZapWriter->Write(&precode, sizeof(precode));
1292 #endif // HAS_NDIRECT_IMPORT_PRECODE
1294 #ifdef HAS_REMOTING_PRECODE
1295 class ZapRemotingPrecode : public ZapNode
1298 DataImage::ItemKind m_kind;
1302 ZapRemotingPrecode(MethodDesc * pMethod, DataImage::ItemKind kind, BOOL fIsPrebound)
1303 : m_pMD(pMethod), m_kind(kind), m_fIsPrebound(fIsPrebound)
1307 virtual DWORD GetSize()
1309 return sizeof(RemotingPrecode);
1312 virtual UINT GetAlignment()
1314 return PRECODE_ALIGNMENT;
1317 virtual ZapNodeType GetType()
1319 return NodeTypeForItemKind(m_kind);
1322 virtual DWORD ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos)
1324 dwPos = AlignUp(dwPos, GetAlignment());
1326 // Alignment for straddlers
1327 if (AlignmentTrim(dwPos + offsetof(RemotingPrecode, m_pMethodDesc), RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR))
1328 dwPos += GetAlignment();
1337 virtual void Save(ZapWriter * pZapWriter)
1339 ZapImage * pImage = ZapImage::GetImage(pZapWriter);
1341 RemotingPrecode precode;
1343 precode.Init(m_pMD);
1346 ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
1347 pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_pMethodDesc),
1348 pNode, offset, IMAGE_REL_BASED_PTR);
1350 pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_callRel32),
1351 pImage->GetHelperThunk(CORINFO_HELP_EE_REMOTING_THUNK), 0, IMAGE_REL_BASED_REL32);
1355 pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_rel32),
1356 pImage->m_pDataImage->GetCodeAddress(m_pMD), 0, IMAGE_REL_BASED_REL32);
1360 pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_rel32),
1361 pImage->GetHelperThunk(CORINFO_HELP_EE_PRESTUB), 0, IMAGE_REL_BASED_REL32);
1364 pZapWriter->Write(&precode, sizeof(precode));
1367 BOOL IsPrebound(ZapImage * pImage)
1369 // This will make sure that when IBC logging is on, the precode goes thru prestub.
1370 if (GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
1373 // Prebind the remoting precode if possible
1374 return pImage->m_pDataImage->CanDirectCall(m_pMD, CORINFO_ACCESS_THIS);
1378 #endif // HAS_REMOTING_PRECODE
1380 void DataImage::SavePrecode(PVOID ptr, MethodDesc * pMD, PrecodeType t, ItemKind kind, BOOL fIsPrebound)
1382 ZapNode * pNode = NULL;
1386 pNode = new (GetHeap()) ZapStubPrecode(pMD, kind);
1387 GetHelperThunk(CORINFO_HELP_EE_PRESTUB);
1390 #ifdef HAS_NDIRECT_IMPORT_PRECODE
1391 case PRECODE_NDIRECT_IMPORT:
1392 pNode = new (GetHeap()) ZapNDirectImportPrecode(pMD, kind);
1393 GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP);
1395 #endif // HAS_NDIRECT_IMPORT_PRECODE
1397 #ifdef HAS_REMOTING_PRECODE
1398 case PRECODE_REMOTING:
1399 pNode = new (GetHeap()) ZapRemotingPrecode(pMD, kind, fIsPrebound);
1401 GetHelperThunk(CORINFO_HELP_EE_REMOTING_THUNK);
1405 GetHelperThunk(CORINFO_HELP_EE_PRESTUB);
1408 #endif // HAS_REMOTING_PRECODE
1411 _ASSERTE(!"Unexpected precode type");
1415 BindPointer(ptr, pNode, 0);
1417 AddStructureInOrder(pNode);
1420 #endif // _TARGET_X86_ || _TARGET_AMD64_
1422 void DataImage::FixupModulePointer(Module * pModule, PVOID p, SSIZE_T offset, ZapRelocationType type)
1424 STANDARD_VM_CONTRACT;
1426 if (pModule != NULL)
1428 if (CanEagerBindToModule(pModule) && CanHardBindToZapModule(pModule))
1430 FixupField(p, offset, pModule, 0, type);
1434 ZapNode * pImport = GetModuleHandleImport(pModule);
1435 FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
1440 void DataImage::FixupMethodTablePointer(MethodTable * pMT, PVOID p, SSIZE_T offset, ZapRelocationType type)
1442 STANDARD_VM_CONTRACT;
1446 if (CanEagerBindToMethodTable(pMT) && CanHardBindToZapModule(pMT->GetLoaderModule()))
1448 FixupField(p, offset, pMT, 0, type);
1452 ZapNode * pImport = GetTypeHandleImport(pMT);
1453 FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
1458 void DataImage::FixupTypeHandlePointer(TypeHandle th, PVOID p, SSIZE_T offset, ZapRelocationType type)
1460 STANDARD_VM_CONTRACT;
1464 if (th.IsTypeDesc())
1466 if (CanEagerBindToTypeHandle(th) && CanHardBindToZapModule(th.GetLoaderModule()))
1468 FixupField(p, offset, th.AsTypeDesc(), 2);
1472 ZapNode * pImport = GetTypeHandleImport(th);
1473 FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
1478 MethodTable * pMT = th.AsMethodTable();
1479 FixupMethodTablePointer(pMT, p, offset, type);
1484 void DataImage::FixupMethodDescPointer(MethodDesc * pMD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/)
1486 STANDARD_VM_CONTRACT;
1490 if (CanEagerBindToMethodDesc(pMD) && CanHardBindToZapModule(pMD->GetLoaderModule()))
1492 FixupField(p, offset, pMD, 0, type);
1496 ZapNode * pImport = GetMethodHandleImport(pMD);
1497 FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
1502 void DataImage::FixupFieldDescPointer(FieldDesc * pFD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/)
1504 STANDARD_VM_CONTRACT;
1508 if (CanEagerBindToFieldDesc(pFD) && CanHardBindToZapModule(pFD->GetLoaderModule()))
1510 FixupField(p, offset, pFD, 0, type);
1514 ZapNode * pImport = GetFieldHandleImport(pFD);
1515 FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
1520 void DataImage::FixupMethodTablePointer(PVOID p, FixupPointer<PTR_MethodTable> * ppMT)
1522 FixupMethodTablePointer(ppMT->GetValue(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_PTR);
1524 void DataImage::FixupTypeHandlePointer(PVOID p, FixupPointer<TypeHandle> * pth)
1526 FixupTypeHandlePointer(pth->GetValue(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_PTR);
1528 void DataImage::FixupMethodDescPointer(PVOID p, FixupPointer<PTR_MethodDesc> * ppMD)
1530 FixupMethodDescPointer(ppMD->GetValue(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_PTR);
1532 void DataImage::FixupFieldDescPointer(PVOID p, FixupPointer<PTR_FieldDesc> * ppFD)
1534 FixupFieldDescPointer(ppFD->GetValue(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_PTR);
1537 void DataImage::FixupModulePointer(PVOID p, RelativeFixupPointer<PTR_Module> * ppModule)
1539 FixupModulePointer(ppModule->GetValueMaybeNull(), p, (BYTE *)ppModule - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
1541 void DataImage::FixupMethodTablePointer(PVOID p, RelativeFixupPointer<PTR_MethodTable> * ppMT)
1543 FixupMethodTablePointer(ppMT->GetValueMaybeNull(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
1545 void DataImage::FixupTypeHandlePointer(PVOID p, RelativeFixupPointer<TypeHandle> * pth)
1547 FixupTypeHandlePointer(pth->GetValueMaybeNull(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
1549 void DataImage::FixupMethodDescPointer(PVOID p, RelativeFixupPointer<PTR_MethodDesc> * ppMD)
1551 FixupMethodDescPointer(ppMD->GetValueMaybeNull(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
1553 void DataImage::FixupFieldDescPointer(PVOID p, RelativeFixupPointer<PTR_FieldDesc> * ppFD)
1555 FixupFieldDescPointer(ppFD->GetValueMaybeNull(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
1558 BOOL DataImage::CanHardBindToZapModule(Module *targetModule)
1560 STANDARD_VM_CONTRACT;
1562 _ASSERTE(targetModule == m_module || targetModule->HasNativeImage());
1563 return targetModule == m_module;
1566 BOOL DataImage::CanEagerBindToTypeHandle(TypeHandle th, BOOL fRequirePrerestore, TypeHandleList *pVisited)
1568 STANDARD_VM_CONTRACT;
1570 Module * pLoaderModule = th.GetLoaderModule();
1574 if (th.IsTypeDesc())
1576 fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForTypeDesc(th.AsTypeDesc()), th.AsTypeDesc());
1580 fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForMethodTable(th.AsMethodTable()), th.AsMethodTable());
1583 if (GetModule() != th.GetLoaderModule())
1585 if (th.IsTypeDesc())
1590 // As a performance optimization, don't eager bind to arrays. They are currently very expensive to
1591 // fixup so we want to do it lazily.
1593 if (th.AsMethodTable()->IsArray())
1598 // For correctness in the face of targeted patching, do not eager bind to any instantiation
1599 // in the target module that might go away.
1600 if (!th.IsTypicalTypeDefinition() &&
1601 !Module::IsAlwaysSavedInPreferredZapModule(th.GetInstantiation(),
1607 // #DoNotEagerBindToTypesThatNeedRestore
1609 // It is important to avoid eager binding to structures that require restore. The code here stops
1610 // this from happening for cross-module fixups. For intra-module cases, eager fixups are allowed to
1611 // (and often do) target types that require restore, even though this is generally prone to all of
1612 // the same problems described below. Correctness is preserved only because intra-module eager
1613 // fixups are ignored in Module::RunEagerFixups (so their semantics are very close to normal
1614 // non-eager fixups).
1616 // For performance, this is the most costly type of eager fixup (and may require otherwise-unneeded
1617 // assemblies to be loaded) and has the lowest benefit, since it does not avoid the need for the
1618 // referencing type to require restore.
1620 // More importantly, this kind of fixup can compromise correctness by causing type loads to occur
1621 // during eager fixup resolution. The system is not designed to cope with this and a variety of
1622 // subtle failures can occur when it happens. As an example, consider a scenario involving the
1623 // following assemblies and types:
1624 // o A1: softbinds to A2, contains "class A1!Level2 extends A2!Level1"
1625 // o A2: hardbinds to A3, contains "class A2!Level1 extends Object", contains methods that use A3!Level3.
1626 // o A3: softbinds to A1, contains "class A3!Level3 extends A1!Level2"
1628 // If eager fixups are allowed to target types that need restore, then it's possible for A2 to end
1629 // up with an eager fixup targeting A3!Level3, setting up this sequence:
1630 // 1 Type load starts for A1!Level2.
1631 // 2 Loading base class A2!Level1 triggers assembly load for A2.
1632 // 3 Loading A2 involves synchronously resolving its eager fixups, including the fixup to A3!Level3.
1633 // 4 A3!Level3 needs restore, so type load starts for A3!Level3.
1634 // 5 Loading A3!Level3 requires loading base class A1!Level2.
1635 // 6 A1!Level2 is already being loaded on this thread (in #1 above), so type load fails.
1636 // 7 Since eager fixup resolution failed, FileLoadException is thrown for A2.
1637 fRequirePrerestore = TRUE;
1640 if (fCanEagerBind && fRequirePrerestore)
1642 fCanEagerBind = !th.ComputeNeedsRestore(this, pVisited);
1645 return fCanEagerBind;
1648 BOOL DataImage::CanEagerBindToMethodTable(MethodTable *pMT, BOOL fRequirePrerestore, TypeHandleList *pVisited)
1650 WRAPPER_NO_CONTRACT;
1652 TypeHandle th = TypeHandle(pMT);
1653 return DataImage::CanEagerBindToTypeHandle(th, fRequirePrerestore, pVisited);
1656 BOOL DataImage::CanEagerBindToMethodDesc(MethodDesc *pMD, BOOL fRequirePrerestore, TypeHandleList *pVisited)
1658 STANDARD_VM_CONTRACT;
1660 BOOL fCanEagerBind = CanEagerBindTo(pMD->GetLoaderModule(), Module::GetPreferredZapModuleForMethodDesc(pMD), pMD);
1662 // Performance optimization -- see comment in CanEagerBindToTypeHandle
1663 if (GetModule() != pMD->GetLoaderModule())
1665 // For correctness in the face of targeted patching, do not eager bind to any instantiation
1666 // in the target module that might go away.
1667 if (!pMD->IsTypicalMethodDefinition() &&
1668 !Module::IsAlwaysSavedInPreferredZapModule(pMD->GetClassInstantiation(),
1669 pMD->GetMethodInstantiation()))
1674 fRequirePrerestore = TRUE;
1677 if (fCanEagerBind && fRequirePrerestore)
1679 fCanEagerBind = !pMD->ComputeNeedsRestore(this, pVisited);
1682 return fCanEagerBind;
1685 BOOL DataImage::CanEagerBindToFieldDesc(FieldDesc *pFD, BOOL fRequirePrerestore, TypeHandleList *pVisited)
1687 STANDARD_VM_CONTRACT;
1689 if (!CanEagerBindTo(pFD->GetLoaderModule(), Module::GetPreferredZapModuleForFieldDesc(pFD), pFD))
1692 MethodTable * pMT = pFD->GetApproxEnclosingMethodTable();
1694 return CanEagerBindToMethodTable(pMT, fRequirePrerestore, pVisited);
1697 BOOL DataImage::CanEagerBindToModule(Module *pModule)
1699 STANDARD_VM_CONTRACT;
1701 return GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pModule);
1704 // "address" is a data-structure belonging to pTargetModule.
1705 // This function returns whether the Module currently being ngenned can
1706 // hardbind "address"
1708 BOOL DataImage::CanEagerBindTo(Module *pTargetModule, Module *pPreferredZapModule, void *address)
1710 STANDARD_VM_CONTRACT;
1712 if (pTargetModule != pPreferredZapModule)
1715 if (GetModule() == pTargetModule)
1718 BOOL eagerBindToZap = GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pTargetModule);
1719 BOOL isPersisted = pTargetModule->IsPersistedObject(address);
1721 return eagerBindToZap && isPersisted;
1724 BOOL DataImage::CanPrerestoreEagerBindToTypeHandle(TypeHandle th, TypeHandleList *pVisited)
1726 WRAPPER_NO_CONTRACT;
1727 return CanEagerBindToTypeHandle(th, TRUE, pVisited);
1730 BOOL DataImage::CanPrerestoreEagerBindToMethodTable(MethodTable *pMT, TypeHandleList *pVisited)
1732 WRAPPER_NO_CONTRACT;
1733 return CanEagerBindToMethodTable(pMT, TRUE, pVisited);
1736 BOOL DataImage::CanPrerestoreEagerBindToMethodDesc(MethodDesc *pMD, TypeHandleList *pVisited)
1738 WRAPPER_NO_CONTRACT;
1739 return CanEagerBindToMethodDesc(pMD, TRUE, pVisited);
1743 void DataImage::HardBindTypeHandlePointer(PVOID p, SSIZE_T offset)
1748 PRECONDITION(CanEagerBindToTypeHandle(*(TypeHandle UNALIGNED*)((BYTE *)p + offset)));
1752 TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset);
1754 if (!thCopy.IsNull())
1756 if (thCopy.IsTypeDesc())
1758 FixupField(p, offset, thCopy.AsTypeDesc(), 2);
1762 FixupField(p, offset, thCopy.AsMethodTable());
1768 // This is obsolete in-place fixup that we should get rid of. For now, it is used for:
1769 // - FnPtrTypeDescs. These should not be stored in NGen images at all.
1770 // - stubs-as-il signatures. These should use tokens when stored in NGen image.
1772 void DataImage::FixupTypeHandlePointerInPlace(PVOID p, SSIZE_T offset, BOOL fForceFixup /*=FALSE*/)
1774 STANDARD_VM_CONTRACT;
1776 TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset);
1778 if (!thCopy.IsNull())
1781 CanEagerBindToTypeHandle(thCopy) &&
1782 CanHardBindToZapModule(thCopy.GetLoaderModule()))
1784 HardBindTypeHandlePointer(p, offset);
1788 ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport((CORINFO_CLASS_HANDLE)thCopy.AsPtr());
1790 ZapNode * pBlob = m_pZapImage->GetImportTable()->PlaceImportBlob(pImport);
1791 FixupFieldToNode(p, offset, pBlob, 0, IMAGE_REL_BASED_ABSOLUTE_TAGGED);
1796 void DataImage::BeginRegion(CorInfoRegionKind regionKind)
1798 STANDARD_VM_CONTRACT;
1800 m_pZapImage->BeginRegion(regionKind);
1803 void DataImage::EndRegion(CorInfoRegionKind regionKind)
1805 STANDARD_VM_CONTRACT;
1807 m_pZapImage->EndRegion(regionKind);
1810 void DataImage::ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee)
1812 STANDARD_VM_CONTRACT;
1813 _ASSERTE(m_inlineTrackingMap);
1814 m_inlineTrackingMap->AddInlining(GetMethod(inliner), GetMethod(inlinee));
1817 InlineTrackingMap * DataImage::GetInlineTrackingMap()
1819 LIMITED_METHOD_DAC_CONTRACT;
1820 return m_inlineTrackingMap;
1824 // Compressed LookupMap Support
1826 // See the large comment near the top of ceeload.h for a much more detailed discussion of this.
1828 // Basically we support a specialized node, ZapCompressedLookupMap, which knows how to compress the array of
1829 // intra-module pointers present in certain types of LookupMap.
1832 // A simple class to write a sequential sequence of variable sized bit-fields into a pre-allocated buffer. I
1833 // was going to use the version defined by GcInfoEncoder (the reader side in ceeload.cpp uses GcInfoDecoder's
1834 // BitStreamReader) but unfortunately the code is not currently factored to make this easy and the resources
1835 // were not available to perform a non-trivial refactorization of the code. In any event the writer is fairly
1836 // trivial and doesn't represent a huge duplication of effort.
1837 // The class requires that the input buffer is DWORD-aligned and sized (it uses a DWORD cache and always
1838 // writes data to the buffer in DWORD-sized chunks).
1839 class BitStreamWriter
1842 // Initialize a writer and point it at the start of a pre-allocated buffer (large enough to accomodate all
1843 // future writes). The buffer must be DWORD-aligned (we use this for some performance optimization).
1844 BitStreamWriter(DWORD *pStart)
1846 LIMITED_METHOD_CONTRACT;
1848 // Buffer must be DWORD-aligned.
1849 _ASSERTE(((TADDR)pStart & 0x3) == 0);
1851 m_pNext = pStart; // Point at the start of the buffer
1852 m_dwCurrent = 0; // We don't have any cached data waiting to write
1853 m_cCurrentBits = 0; // Ditto
1854 m_cBitsWritten = 0; // We haven't written any bits
1857 // Write the low-order cBits of dwData to the stream.
1858 void Write(DWORD dwData, DWORD cBits)
1860 LIMITED_METHOD_CONTRACT;
1862 // We can only write between 1 and 32 bits of data at a time.
1863 _ASSERTE(cBits > 0 && cBits <= kBitsPerDWORD);
1865 // Check that none of the unused high-order bits of dwData have stale data in them (we can use this to
1866 // optimize paths below). Use two conditions here because << of 32-bits or more (on x86) doesn't
1867 // do what you might expect (the RHS is modulo 32 so "<< 32" is a no-op rather than zero-ing the
1869 _ASSERTE((cBits == kBitsPerDWORD) || ((dwData & ((1U << cBits) - 1)) == dwData));
1871 // Record the input bits as written (we can't fail and we have multiple exit paths below so it's
1872 // convenient to update our counter here).
1873 m_cBitsWritten += cBits;
1875 // We cache up to a DWORD of data to be written to the stream and only write back to the buffer when
1876 // we have a full DWORD. Calculate how many bits of the input we're going to write first (either the
1877 // rest of the input or the remaining bits of space in the current DWORD cache, whichever is smaller).
1878 DWORD cInitialBits = min(cBits, kBitsPerDWORD - m_cCurrentBits);
1879 if (cInitialBits == kBitsPerDWORD)
1881 // Deal with this special case (we're writing all the input, an entire DWORD all at once) since it
1882 // ensures that none of the << operations below have to deal with a LHS that == 32 (see the <<
1883 // comment in one of the asserts above for why this matters).
1885 // Because of the calculations above we should only come here if our DWORD cache was empty and the
1886 // caller is trying to write a full DWORD (which simplifies many things).
1887 _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0 && cBits == kBitsPerDWORD);
1889 *m_pNext++ = dwData; // Write a full DWORD directly from the input
1891 // That's it, there's no more data to write and the only state update to the write was advancing
1892 // the buffer pointer (cache DWORD is already in the correct state, see asserts above).
1896 // Calculate a mask of the low-order bits we're going to extract from the input data.
1897 DWORD dwInitialMask = (1U << cInitialBits) - 1;
1899 // OR those bits into the cache (properly shifted to fit above the data already there).
1900 m_dwCurrent |= (dwData & dwInitialMask) << m_cCurrentBits;
1902 // Update the cache bit counter for the new data.
1903 m_cCurrentBits += cInitialBits;
1904 if (m_cCurrentBits == kBitsPerDWORD)
1906 // The cache filled up. Write the DWORD to the buffer and reset the cache state to empty.
1907 *m_pNext++ = m_dwCurrent;
1912 // If the bits we just inserted comprised all the input bits we're done.
1913 if (cInitialBits == cBits)
1916 // There's more data to write. But we can only get here if we just flushed the cache. So there is a
1917 // whole DWORD free in the cache and we're guaranteed to have less than a DWORD of data left to write.
1918 // As a result we can simply populate the low-order bits of the cache with our remaining data (simply
1919 // shift down by the number of bits we've already written) and we're done.
1920 _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0);
1921 m_dwCurrent = dwData >>= cInitialBits;
1922 m_cCurrentBits = cBits - cInitialBits;
1925 // Because we cache a DWORD of data before writing it it's possible that there are still unwritten bits
1926 // left in the cache once you've finished writing data. Call this operation after all Writes() are
1927 // completed to flush any such data to memory. It's not legal to call Write() again after a Flush().
1930 LIMITED_METHOD_CONTRACT;
1932 // Nothing to do if the cache is empty.
1933 if (m_cCurrentBits == 0)
1936 // Write what we have to memory (unused high-order bits will be zero).
1937 *m_pNext = m_dwCurrent;
1939 // Catch any attempt to make a further Write() call.
1943 // Get the count of bits written so far (logically, this number does not take caching into account).
1944 DWORD GetBitsWritten()
1946 LIMITED_METHOD_CONTRACT;
1948 return m_cBitsWritten;
1952 enum { kBitsPerDWORD = sizeof(DWORD) * 8 };
1954 DWORD *m_pNext; // Pointer to the next DWORD that will be written in the buffer
1955 DWORD m_dwCurrent; // We cache up to a DWORD of data before writing it to the buffer
1956 DWORD m_cCurrentBits; // Count of valid (low-order) bits in the buffer above
1957 DWORD m_cBitsWritten; // Count of bits given to Write() (ignores caching)
1960 // A specialized node used to write the compressed portions of a LookupMap to an ngen image. This is
1961 // (optionally) allocated by a call to DataImage::StoreCompressedLayoutMap from LookupMapBase::Save() and
1962 // handles allocation and initialization of the compressed table and an index used to navigate the table
1963 // efficiently. The allocation of the map itself and any hot item list is still handled externally but this
1964 // node will perform any fixups in the base map required to refer to the new compressed data.
1966 // Since the compression algorithm used depends on the precise values of the RVAs referenced by the LookupMap
1967 // the compression doesn't happen until ComputeRVA is called (don't call GetSize() until after ComputeRVA()
1968 // returns). Additionally we must ensure that this node's ComputeRVA() is not called until after that of every
1969 // node on those RVA it depends. Currently this is ensured by placing this node near the end of the .text
1970 // section (after pointers to any read-only data structures referenced by LookupMaps and after the .data
1971 // section containing writeable structures).
1972 class ZapCompressedLookupMap : public ZapNode
1974 DataImage *m_pImage; // Back pointer to the allocating DataImage
1975 LookupMapBase *m_pMap; // Back pointer to the LookupMap we're compressing
1976 BYTE *m_pTable; // ComputeRVA allocates a compressed table here
1977 BYTE *m_pIndex; // ComputeRVA allocates a table index here
1978 DWORD m_cbTable; // Size (in bytes) of the table above (after ComputeRVA)
1979 DWORD m_cbIndex; // Size (in bytes) of the index above (after ComputeRVA)
1980 DWORD m_cBitsPerIndexEntry; // Number of bits in each index entry
1981 DWORD m_rgHistogram[kBitsPerRVA]; // Table of frequencies of different delta lengths
1982 BYTE m_rgEncodingLengths[kLookupMapLengthEntries]; // Table of different bit lengths value deltas can take
1983 BYTE m_eKind; // Item kind (DataImage::ITEM_COMPRESSED_MAP currently)
1986 ZapCompressedLookupMap(DataImage *pImage, LookupMapBase *pMap, BYTE eKind)
1987 : m_pImage(pImage), m_pMap(pMap), m_eKind(eKind)
1989 LIMITED_METHOD_CONTRACT;
1992 DataImage::ItemKind GetKind()
1994 LIMITED_METHOD_CONTRACT;
1996 return (DataImage::ItemKind)m_eKind;
1999 virtual DWORD GetSize()
2001 LIMITED_METHOD_CONTRACT;
2003 if (!ShouldCompressedMapBeSaved())
2006 // This isn't legal until ComputeRVA() is called. Check this by seeing if the compressed version of
2007 // the table is allocated yet.
2008 _ASSERTE(m_pTable != NULL);
2009 return m_cbIndex + m_cbTable;
2012 virtual UINT GetAlignment()
2014 LIMITED_METHOD_CONTRACT;
2016 if (!ShouldCompressedMapBeSaved())
2019 // The table and index have no pointers but do require DWORD alignment.
2020 return sizeof(DWORD);
2023 virtual ZapNodeType GetType()
2025 STANDARD_VM_CONTRACT;
2027 return NodeTypeForItemKind(m_eKind);
2030 virtual DWORD ComputeRVA(ZapWriter *pZapWriter, DWORD dwPos)
2032 STANDARD_VM_CONTRACT;
2034 if (ShouldCompressedMapBeSaved())
2037 // This is the earliest opportunity at which all data is available in order to compress the table. In
2038 // particular all values in the table (currently MethodTable* or MethodDesc*) point to structures
2039 // which have been assigned final RVAs in the image. We can thus compute a compressed table value that
2040 // relies on the relationship between these RVAs.
2042 // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent
2043 // items and build a histogram of how many entries require a specific number to encode their delta
2044 // (using a scheme we we discard non-significant low and high-order zero bits). This call will
2045 // initialize m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to
2046 // encode their delta, entry 1 the count of those that require 2 bits etc. up to the last entry (how
2047 // many entries require the full 32 bits). Note that even on 64-bit platforms we only currently
2048 // support 32-bit RVAs.
2049 DWORD cRids = AnalyzeTable();
2051 // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table
2052 // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length
2053 // so we don't have to embed a large fixed-size length field for every compressed entry but we can
2054 // still cope with the relatively rare but ever-present worst case entries which require many bits of
2056 OptimizeEncodingLengths();
2058 // Phase 3: We now have enough data to allocate the final data structures (the compressed table itself
2059 // and an index that bookmarks every kLookupMapIndexStride'th entry). Both structures must start
2060 // DWORD-aligned and have a DWORD-aligned size (requirements of BitStreamWriter).
2062 // PredictCompressedSize() returns its result in bits so we must convert (rounding up) to bytes before
2064 m_cbTable = AlignUp((PredictCompressedSize(m_rgEncodingLengths) + 7) / 8, sizeof(DWORD));
2066 // Each index entry contains a bit offset into the compressed stream (so we must size for the worst
2067 // case of an offset at the end of the stream) plus an RVA.
2068 m_cBitsPerIndexEntry = BitsRequired(m_cbTable * 8) + kBitsPerRVA;
2069 _ASSERTE(m_cBitsPerIndexEntry > 0);
2071 // Our first index entry is for entry 0 (rather than entry kLookupMapIndexStride) so we must be
2072 // sure to round up the number of index entries we need in order to cover the table.
2073 DWORD cIndexEntries = (cRids + (kLookupMapIndexStride - 1)) / kLookupMapIndexStride;
2075 // Since we calculate the index size in bits we need to round up to bytes before DWORD aligning.
2076 m_cbIndex = AlignUp(((m_cBitsPerIndexEntry * cIndexEntries) + 7) / 8, sizeof(DWORD));
2078 // Allocate both table and index from a single chunk of memory.
2079 BYTE *pMemory = new BYTE[m_cbIndex + m_cbTable];
2081 m_pIndex = pMemory + m_cbTable;
2083 // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we
2084 // can go ahead and fill in the compressed table and index.
2085 InitializeTableAndIndex();
2087 // Phase 5: Go back up update the saved version of the LookupMap (redirect the table pointer to the
2088 // compressed table and fill in the other fields which aren't valid until the table is compressed).
2089 LookupMapBase *pSaveMap = (LookupMapBase*)m_pImage->GetImagePointer(m_pMap);
2090 pSaveMap->pTable = (TADDR*)m_pTable;
2091 pSaveMap->pIndex = m_pIndex;
2092 pSaveMap->cIndexEntryBits = m_cBitsPerIndexEntry;
2093 pSaveMap->cbTable = m_cbTable;
2094 pSaveMap->cbIndex = m_cbIndex;
2095 memcpy(pSaveMap->rgEncodingLengths, m_rgEncodingLengths, sizeof(m_rgEncodingLengths));
2097 // Schedule fixups for the map pointers to the compressed table and index.
2098 m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pTable), this, 0);
2099 m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pIndex), this, m_cbTable);
2102 // We're done with generating the compressed table. Now we need to do the work ComputeRVA() is meant
2104 dwPos = AlignUp(dwPos, GetAlignment()); // Satisfy our alignment requirements
2105 SetRVA(dwPos); // Set the RVA of the node (both table and index)
2106 dwPos += GetSize(); // Advance the RVA past our node
2111 virtual void Save(ZapWriter *pZapWriter)
2113 STANDARD_VM_CONTRACT;
2115 if (!ShouldCompressedMapBeSaved())
2118 // Save both the table and index.
2119 pZapWriter->Write(m_pTable, m_cbTable);
2120 pZapWriter->Write(m_pIndex, m_cbIndex);
2125 // It's possible that our node has been created and only later the decision is made to store the full
2126 // uncompressed table. In this case, we want to early out of our work and make saving our node a no-op.
2127 BOOL ShouldCompressedMapBeSaved()
2129 LIMITED_METHOD_CONTRACT;
2131 // To identify whether compression is desired, use the flag from LookupMapBase::Save
2132 return (m_pMap->cIndexEntryBits > 0);
2135 // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent items
2136 // and build a histogram of how many entries require a specific number to encode their delta (using a
2137 // scheme we we discard non-significant low and high-order zero bits). This call will initialize
2138 // m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to encode their delta,
2139 // entry 1 the count of those that require 2 bits etc. up to the last entry (how many entries require the
2140 // full 32 bits). Note that even on 64-bit platforms we only currently support 32-bit RVAs.
2141 DWORD AnalyzeTable()
2143 STANDARD_VM_CONTRACT;
2145 LookupMapBase *pMap = m_pMap;
2146 DWORD dwLastValue = 0;
2149 // Initialize the histogram to all zeroes.
2150 memset(m_rgHistogram, 0, sizeof(m_rgHistogram));
2152 // Walk each node in the map.
2155 // Walk each entry in this node.
2156 for (DWORD i = 0; i < pMap->dwCount; i++)
2158 DWORD dwCurrentValue = ComputeElementRVA(pMap, i);
2160 // Calculate the delta from the last entry. We split the delta into two-components: a bool
2161 // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of
2162 // like a ones-complement signed number.
2163 bool fIncreasingDelta = dwCurrentValue > dwLastValue;
2164 DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue);
2166 // Determine the minimum number of bits required to represent the delta (by stripping
2167 // non-significant leading zeros) and update the count in the histogram of the number of
2168 // deltas that required this many bits. We never encode anything with zero bits (only the
2169 // value zero would be eligibil and it's not a common value) so the first histogram entry
2170 // records the number of deltas encodable with one bit and so on.
2171 m_rgHistogram[BitsRequired(dwDelta) - 1]++;
2173 dwLastValue = dwCurrentValue;
2183 // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table
2184 // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length so
2185 // we don't have to embed a large fixed-size length field for every compressed entry but we can still cope
2186 // with the relatively rare but ever-present worst case entries which require many bits of delta entry.
2187 void OptimizeEncodingLengths()
2189 STANDARD_VM_CONTRACT;
2191 // Find the longest delta (search from the large end of the histogram down for the first non-zero
2195 #pragma warning(suppress:6293) // Prefast doesn't understand the unsigned modulo-8 arithmetic below.
2197 for (BYTE i = kBitsPerRVA - 1; i < 0xff; i--)
2198 if (m_rgHistogram[i] > 0)
2200 bMaxBits = i + 1; // +1 because we never encode anything with zero bits.
2203 _ASSERTE(bMaxBits >= 1);
2205 // Now find the smallest delta in a similar fashion.
2206 BYTE bMinBits = bMaxBits;
2207 for (BYTE i = 0; i < kBitsPerRVA; i++)
2208 if (m_rgHistogram[i] > 0)
2210 bMinBits = i + 1; // +1 because we never encode anything with zero bits.
2213 _ASSERTE(bMinBits <= bMaxBits);
2215 // The encoding lengths table is a sorted list of bit field lengths we can use to encode any
2216 // entry-to-entry delta in the compressed table. We go through a table so we can use a small number of
2217 // bits in the compressed stream (the table index) to express a very flexible range of deltas. The one
2218 // entry we know in advance is the largest (the last). That's because we know we have to be able to
2219 // encode the largest delta we found in the table or else we couldn't be functionally correct.
2220 m_rgEncodingLengths[kLookupMapLengthEntries - 1] = bMaxBits;
2222 // Now find optimal values for the other entries one by one. It doesn't really matter which order we
2223 // do them in. For each entry we'll loop through all the possible encoding lengths, dwMinBits <=
2224 // length < dwMaxBits, setting all the uninitialized entries to the candidate value and calculating
2225 // the resulting compressed size of the table. We don't enforce that the candidate sizes get smaller
2226 // for each entry so in that if the best use of an extra table entry is to add a larger length rather
2227 // than a smaller one then we'll take that. The downside is that we have to sort the table before
2228 // calculating the table size (the sizing algorithm is only fast for a sorted table). Luckily our
2229 // table is very small (currently 4 entries) and we don't have to sort one of the entries (the last is
2230 // always largest) so this isn't such a huge deal.
2231 for (DWORD i = 0; i < kLookupMapLengthEntries - 1; i++)
2233 DWORD dwBestSize = 0xffffffff; // Best overall table size so far
2234 BYTE bBestLength = bMaxBits; // The candidate value that lead to the above
2236 // Iterate over all the values that could generate a good result (no point trying values smaller
2237 // than the smallest delta we have or as large as the maximum table entry we've already fixed).
2238 for (BYTE j = bMinBits; j < bMaxBits; j++)
2240 // Build a temporary (unsorted) encoding table.
2241 BYTE rgTempBuckets[kLookupMapLengthEntries];
2243 // Entries before the current one are set to the values we've already determined in previous
2245 for (DWORD k = 0; k < i; k++)
2246 rgTempBuckets[k] = m_rgEncodingLengths[k];
2248 // The current entry and the remaining uninitialized entries are all set to the current
2249 // candidate value (this is logically the equivalent of removing the non-current uninitialized
2250 // entries from the table altogether).
2251 for (DWORD k = i; k < kLookupMapLengthEntries - 1; k++)
2252 rgTempBuckets[k] = j;
2254 // The last entry is always the maximum bit length.
2255 rgTempBuckets[kLookupMapLengthEntries - 1] = bMaxBits;
2257 // Sort the temporary table so that the call to PredictCompressedSize() below behaves
2258 // correctly (and fast).
2259 SortLengthBuckets(rgTempBuckets);
2261 // See what size of table this would generate.
2262 DWORD dwTestSize = PredictCompressedSize(rgTempBuckets);
2263 if (dwTestSize < dwBestSize)
2265 // The result is better than our current best, remember it.
2266 dwBestSize = dwTestSize;
2271 // Set the current entry to the best length we found.
2272 m_rgEncodingLengths[i] = bBestLength;
2275 // We've picked optimal values for all entries, but the result is unsorted. Fix that now.
2276 SortLengthBuckets(m_rgEncodingLengths);
2279 // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we can
2280 // go ahead and fill in the compressed table and index.
2281 void InitializeTableAndIndex()
2283 STANDARD_VM_CONTRACT;
2285 // Initialize bit stream writers to the start of the compressed table and index.
2286 BitStreamWriter sTableStream((DWORD*)m_pTable);
2287 BitStreamWriter sIndexStream((DWORD*)m_pIndex);
2290 DWORD dwLastValue = 0;
2291 LookupMapBase *pMap = m_pMap;
2293 // Walk each node in the map.
2296 // Walk each entry in this node.
2297 for (DWORD i = 0; i < pMap->dwCount; i++)
2299 DWORD dwCurrentValue = ComputeElementRVA(pMap, i);
2301 // Calculate the delta from the last entry. We split the delta into two-components: a bool
2302 // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of
2303 // like a ones-complement signed number.
2304 bool fIncreasingDelta = dwCurrentValue > dwLastValue;
2305 DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue);
2307 // As a trade-off we can't store deltas with their most efficient length (because just
2308 // encoding the length can dominate the space requirement when we have to cope with worst-case
2309 // deltas). Instead we encode a relatively short index into the table of encoding lengths we
2310 // calculated back in phase 2. So some deltas will encode in more bits than necessary but
2311 // overall we'll win due to lowered prefix bit requirements.
2312 // Look through all the table entries and choose the first that's large enough to accomodate
2314 DWORD dwDeltaBitLength = BitsRequired(dwDelta);
2316 for (j = 0; j < kLookupMapLengthEntries; j++)
2318 if (m_rgEncodingLengths[j] >= dwDeltaBitLength)
2320 dwDeltaBitLength = m_rgEncodingLengths[j];
2324 _ASSERTE(j < kLookupMapLengthEntries);
2326 // Write the entry into the compressed table.
2327 sTableStream.Write(j, kLookupMapLengthBits); // The index for the delta length
2328 sTableStream.Write(fIncreasingDelta ? 1 : 0, 1); // The +/- delta indicator
2329 sTableStream.Write(dwDelta, dwDeltaBitLength); // The delta itself
2331 // Is this entry one that requires a corresponding index entry?
2332 if ((dwRid % kLookupMapIndexStride) == 0)
2334 // Write an index entry:
2335 // * The current (map-relative) RVA.
2336 // * The position in the table bit stream of the next entry.
2337 sIndexStream.Write(dwCurrentValue, kBitsPerRVA);
2338 sIndexStream.Write(sTableStream.GetBitsWritten(), m_cBitsPerIndexEntry - kBitsPerRVA);
2343 dwLastValue = dwCurrentValue;
2349 // Flush any remaining bits in the caches of the table and index stream writers.
2350 sTableStream.Flush();
2351 sIndexStream.Flush();
2353 // Make sure what we wrote fitted in what we allocated.
2354 _ASSERTE((sTableStream.GetBitsWritten() / 8) <= m_cbTable);
2355 _ASSERTE((sIndexStream.GetBitsWritten() / 8) <= m_cbIndex);
2357 // Also check that we didn't have more than 31 bits of excess space allocated either (we should have
2358 // allocated DWORD aligned lengths).
2359 _ASSERTE(((m_cbTable * 8) - sTableStream.GetBitsWritten()) < 32);
2360 _ASSERTE(((m_cbIndex * 8) - sIndexStream.GetBitsWritten()) < 32);
2363 // Determine the final, map-relative RVA of the element at a specified index
2364 DWORD ComputeElementRVA(LookupMapBase *pMap, DWORD index)
2366 STANDARD_VM_CONTRACT;
2368 // We base our RVAs on the RVA of the map (rather than the module). This is purely because individual
2369 // maps don't store back pointers to their owning module so it's easier to recover pointer values at
2370 // runtime using the map address instead.
2371 DWORD rvaBase = m_pImage->GetRVA(m_pMap);
2373 // Retrieve the pointer value in the specified entry. This is tricky since the pointer is
2374 // encoded as a RelativePointer.
2376 TADDR entry = RelativePointer<TADDR>::GetValueMaybeNullAtPtr((TADDR)&pMap->pTable[index]);
2379 // The pointer was null. We encode this as a zero RVA (RVA pointing to the map itself,
2380 // which should never happen otherwise).
2385 // Non-null pointer, go get the RVA it's been mapped to. Transform this RVA into our
2386 // special map-relative variant by substracting the map base.
2388 // Some of the pointer alignment bits may have been used as flags; preserve them.
2389 DWORD flags = entry & ((1 << kFlagBits) - 1);
2392 // We only support compressing maps of pointers to saved objects (e.g. no indirected FixupPointers)
2393 // so there is guaranteed to be a valid RVA at this point. If this does not hold, GetRVA will assert.
2394 DWORD rvaEntry = m_pImage->GetRVA((void*)entry);
2396 dwFinalRVA = rvaEntry - rvaBase + flags;
2402 // Determine the number of bits required to represent the significant portion of a value (i.e. the value
2403 // without any leading 0s). Always return 1 as a minimum (we do not encode 0 in 0 bits).
2404 DWORD BitsRequired(DWORD dwValue)
2406 LIMITED_METHOD_CONTRACT;
2408 #if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && defined(_MSC_VER)
2410 // This this operation could impact the performance of ngen (we call this a *lot*) we'll try and
2411 // optimize this where we can. x86 and amd64 actually have instructions to find the least and most
2412 // significant bits in a DWORD and MSVC exposes this as a builtin.
2414 if (_BitScanReverse(&dwHighBit, dwValue))
2415 return dwHighBit + 1;
2419 #else // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER
2421 // Otherwise we'll calculate this the slow way. Pick off the 32-bit case first due to avoid the
2422 // usual << problem (x << 32 == x, not 0).
2423 if (dwValue > 0x7fffffff)
2427 while (dwValue > ((1U << cBits) - 1))
2432 #endif // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER
2435 // Sort the given input array (of kLookupMapLengthEntries entries, where the last entry is already sorted)
2436 // from lowest to highest value.
2437 void SortLengthBuckets(BYTE rgBuckets[])
2439 LIMITED_METHOD_CONTRACT;
2441 // This simplistic insertion sort algorithm is probably the fastest for small values of
2442 // kLookupMapLengthEntries.
2443 _ASSERTE(kLookupMapLengthEntries < 10);
2445 // Iterate over every entry apart from the last two, moving the correct sorted value into each in
2446 // turn. Don't do the last value because it's already sorted and the second last because it'll be
2447 // sorted by the time we've done all the rest.
2448 for (DWORD i = 0; i < (kLookupMapLengthEntries - 2); i++)
2450 BYTE bLowValue = rgBuckets[i]; // The lowest value we've seen so far
2451 DWORD dwLowIndex = i; // The index which held that value
2453 // Look through the unsorted entries for the smallest.
2454 for (DWORD j = i + 1; j < (kLookupMapLengthEntries - 1); j++)
2456 if (rgBuckets[j] < bLowValue)
2458 // Got a bette candidate for smallest.
2459 bLowValue = rgBuckets[j];
2464 // If the original value at the current index wasn't the smallest, swap it with the one that was.
2465 if (dwLowIndex != i)
2467 rgBuckets[dwLowIndex] = rgBuckets[i];
2468 rgBuckets[i] = bLowValue;
2473 // Check the table really is sorted.
2474 for (DWORD i = 1; i < kLookupMapLengthEntries; i++)
2475 _ASSERTE(rgBuckets[i] >= rgBuckets[i - 1]);
2479 // Given the histogram of the delta lengths and a prospective table of the subset of those lengths that
2480 // we'd utilize to encode the table, return the size (in bits) of the compressed table we'd get as a
2481 // result. The algorithm requires that the encoding length table is sorted (smallest to largest length).
2482 DWORD PredictCompressedSize(BYTE rgBuckets[])
2484 LIMITED_METHOD_CONTRACT;
2486 DWORD cTotalBits = 0;
2488 // Iterate over each entry in the histogram (first entry is the number of deltas that can be encoded
2489 // in 1 bit, the second is the number of entries encodable in 2 bits etc.).
2490 for (DWORD i = 0; i < kBitsPerRVA; i++)
2492 // Start by assuming that we can encode entries in this bucket with their exact length.
2493 DWORD cBits = i + 1;
2495 // Look through the encoding table to find the first (lowest) encoding length that can encode the
2496 // values for this bucket.
2497 for (DWORD j = 0; j < kLookupMapLengthEntries; j++)
2499 if (cBits <= rgBuckets[j])
2501 // This is the best encoding we can do. Remember the real cost of all entries in this
2502 // histogram bucket.
2503 cBits = rgBuckets[j];
2508 // Each entry for this histogram bucket costs a fixed size index into the encoding length table
2509 // (kLookupMapLengthBits), a single bit of delta sign plus the number of bits of delta magnitude
2510 // that we calculated above.
2511 cTotalBits += (kLookupMapLengthBits + 1 + cBits) * m_rgHistogram[i];
2518 // Allocate a special zap node that will compress the cold rid map associated with the given LookupMap.
2519 void DataImage::StoreCompressedLayoutMap(LookupMapBase *pMap, ItemKind kind)
2521 STANDARD_VM_CONTRACT;
2523 ZapNode *pNode = new (GetHeap()) ZapCompressedLookupMap(this, pMap, static_cast<BYTE>(kind));
2525 AddStructureInOrder(pNode);
2528 #endif // FEATURE_PREJIT