1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 // --------------------------------------------------------------------------------
10 #include "pedecoder.h"
14 #ifdef FEATURE_LAZY_COW_PAGES
17 // We can't use the hosted ClrVirtualProtect, because EnsureWritablePages is called in places where we can't
18 // call into the host.
32 LONG* g_pCOWPageMap; // one bit for each page in the virtual address space
33 LONG g_COWPageMapMap; // one bit for each page in g_pCOWPageMap.
35 LONG* EnsureCOWPageMapAllocated()
37 if (g_pCOWPageMap == NULL)
39 // We store one bit for every page in the virtual address space. We may need to revisit this for 64-bit. :)
41 stats.dwLength = sizeof(stats);
42 if (GlobalMemoryStatusEx(&stats))
44 _ASSERTE(stats.ullTotalVirtual < 0x100000000ULL);
46 SIZE_T mapSize = (SIZE_T)((stats.ullTotalVirtual / GetOsPageSize()) / 8);
47 _ASSERTE(mapSize / GetOsPageSize() <= 32); // g_COWPageMapMap can only track 32 pages
49 // Note that VirtualAlloc will zero-fill the pages for us.
50 LONG* pMap = (LONG*)VirtualAlloc(
58 if (NULL != InterlockedCompareExchangeT(&g_pCOWPageMap, pMap, NULL))
59 VirtualFree(pMap, 0, MEM_RELEASE);
66 bool EnsureCOWPageMapElementAllocated(LONG* elem)
68 _ASSERTE(elem > g_pCOWPageMap);
69 _ASSERTE(g_pCOWPageMap != NULL);
71 size_t offset = (size_t)elem - (size_t)g_pCOWPageMap;
72 size_t page = offset / GetOsPageSize();
75 int bit = (int)(1 << page);
77 if (!(g_COWPageMapMap & bit))
79 if (!VirtualAlloc(elem, 1, MEM_COMMIT, PAGE_READWRITE))
82 InterlockedOr(&g_COWPageMapMap, bit);
88 bool IsCOWPageMapElementAllocated(LONG* elem)
90 _ASSERTE(elem >= g_pCOWPageMap);
91 _ASSERTE(g_pCOWPageMap != NULL);
93 size_t offset = (size_t)elem - (size_t)g_pCOWPageMap;
94 size_t page = offset / GetOsPageSize();
97 int bit = (int)(1 << page);
99 return (g_COWPageMapMap & bit) != 0;
102 bool SetCOWPageBits(BYTE* pStart, size_t len, bool value)
106 // we don't need a barrier here, since:
107 // a) all supported hardware maintains ordering of dependent reads
108 // b) it's ok if additional reads happen, because this never changes
110 LONG* pCOWPageMap = g_pCOWPageMap;
113 // Write the bits in 32-bit chunks, to avoid doing one interlocked instruction for each bit.
115 size_t page = (size_t)pStart / GetOsPageSize();
116 size_t lastPage = (size_t)(pStart+len-1) / GetOsPageSize();
117 size_t elem = page / 32;
121 bits |= 1 << (page % 32);
126 // if we've moved to a new element of the map, or we've covered every page,
127 // we need to write out the already-accumulated element.
129 size_t newElem = page / 32;
130 if (page > lastPage || newElem != elem)
132 LONG* pElem = &pCOWPageMap[elem];
133 if (!EnsureCOWPageMapElementAllocated(pElem))
137 InterlockedOr(&pCOWPageMap[elem], bits);
139 InterlockedAnd(&pCOWPageMap[elem], ~bits);
145 while (page <= lastPage);
150 bool SetCOWPageBitsForImage(PEDecoder * pImage, bool value)
152 if (!pImage->HasNativeHeader())
157 IMAGE_SECTION_HEADER* pSection;
161 pSection = pImage->FindSection(".data");
162 if (pSection != NULL)
164 pStart = (BYTE*) dac_cast<TADDR>(pImage->GetBase()) + pSection->VirtualAddress;
165 len = pSection->Misc.VirtualSize;
167 if (!SetCOWPageBits(pStart, len, value))
171 pSection = pImage->FindSection(".xdata");
172 if (pSection != NULL)
174 pStart = (BYTE*) dac_cast<TADDR>(pImage->GetBase()) + pSection->VirtualAddress;
175 len = pSection->Misc.VirtualSize;
177 if (!SetCOWPageBits(pStart, len, value))
184 bool AreAnyCOWPageBitsSet(BYTE* pStart, size_t len)
186 LONG* pCOWPageMap = g_pCOWPageMap;
187 if (pCOWPageMap == NULL)
191 size_t page = (size_t)pStart / GetOsPageSize();
192 size_t lastPage = (size_t)(pStart+len-1) / GetOsPageSize();
195 LONG* pElem = &pCOWPageMap[page / 32];
196 if (IsCOWPageMapElementAllocated(pElem) &&
197 (*pElem & (1 << (page %32))))
203 while (page <= lastPage);
209 void AllocateLazyCOWPages(PEDecoder * pImage)
212 // Note: it's ok to call AllocateLazyCOWPages multiple times for the same loaded image.
213 // This will result in any already-writable pages being incorrectly marked as read-only
214 // in our records, but that just means we'll call VirtualProtect one more time.
216 // However, FreeLazyCOWPages must be called just once per loaded image, when it is actually.
217 // unloaded. Otherwise we will lose track of the COW pages in that image while it might
218 // still be accessible.
221 if (!EnsureCOWPageMapAllocated())
224 if (!SetCOWPageBitsForImage(pImage, true))
228 void FreeLazyCOWPages(PEDecoder * pImage)
231 // Must be called just once per image; see note in AllocateLazyCOWPages.
233 SetCOWPageBitsForImage(pImage, false);
236 bool IsInReadOnlyLazyCOWPage(void* p)
238 return AreAnyCOWPageBitsSet((BYTE*)p, 1);
242 bool MakeWritable(BYTE* pStart, size_t len, DWORD protect)
245 if (!VirtualProtect(pStart, len, protect, &oldProtect))
247 INDEBUG(bool result = ) SetCOWPageBits(pStart, len, false);
248 _ASSERTE(result); // we already set these, so we must be able to clear them.
252 bool EnsureWritablePagesNoThrow(void* p, size_t len)
254 BYTE* pStart = (BYTE*)p;
259 if (!AreAnyCOWPageBitsSet(pStart, len))
262 return MakeWritable(pStart, len, PAGE_READWRITE);
265 void EnsureWritablePages(void* p, size_t len)
271 BYTE* pStart = (BYTE*)p;
276 if (!AreAnyCOWPageBitsSet(pStart, len))
279 if (!MakeWritable(pStart, len, PAGE_READWRITE))
283 bool EnsureWritableExecutablePagesNoThrow(void* p, size_t len)
285 BYTE* pStart = (BYTE*) p;
290 if (!AreAnyCOWPageBitsSet(pStart, len))
293 return MakeWritable(pStart, len, PAGE_EXECUTE_READWRITE);
296 void EnsureWritableExecutablePages(void* p, size_t len)
302 BYTE* pStart = (BYTE*) p;
307 if (!AreAnyCOWPageBitsSet(pStart, len))
310 if (!MakeWritable(pStart, len, PAGE_EXECUTE_READWRITE))
314 #endif // FEATURE_LAZY_COW_PAGES