1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
7 #include "env/gcenv.os.h"
8 #include "softwarewritewatch.h"
10 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
11 #ifndef DACCESS_COMPILE
13 static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift) == OS_PAGE_SIZE, "Unexpected OS_PAGE_SIZE");
17 uint8_t *g_gc_sw_ww_table = nullptr;
18 bool g_gc_sw_ww_enabled_for_gc_heap = false;
21 void SoftwareWriteWatch::StaticClose()
23 if (GetTable() == nullptr)
28 g_gc_sw_ww_enabled_for_gc_heap = false;
29 g_gc_sw_ww_table = nullptr;
32 bool SoftwareWriteWatch::GetDirtyFromBlock(
34 uint8_t *firstPageAddressInBlock,
35 size_t startByteIndex,
38 size_t *dirtyPageIndexRef,
39 size_t dirtyPageCount,
42 assert(block != nullptr);
43 assert(ALIGN_DOWN(block, sizeof(size_t)) == block);
44 assert(firstPageAddressInBlock == reinterpret_cast<uint8_t *>(GetPageAddress(block - GetTable())));
45 assert(startByteIndex < endByteIndex);
46 assert(endByteIndex <= sizeof(size_t));
47 assert(dirtyPages != nullptr);
48 assert(dirtyPageIndexRef != nullptr);
50 size_t &dirtyPageIndex = *dirtyPageIndexRef;
51 assert(dirtyPageIndex < dirtyPageCount);
53 size_t dirtyBytes = *reinterpret_cast<size_t *>(block);
59 if (startByteIndex != 0)
61 size_t numLowBitsToClear = startByteIndex * 8;
62 dirtyBytes >>= numLowBitsToClear;
63 dirtyBytes <<= numLowBitsToClear;
65 if (endByteIndex != sizeof(size_t))
67 size_t numHighBitsToClear = (sizeof(size_t) - endByteIndex) * 8;
68 dirtyBytes <<= numHighBitsToClear;
69 dirtyBytes >>= numHighBitsToClear;
72 while (dirtyBytes != 0)
75 static_assert(sizeof(size_t) <= 8, "Unexpected sizeof(size_t)");
76 if (sizeof(size_t) == 8)
78 BitScanForward64(&bitIndex, static_cast<DWORD64>(dirtyBytes));
82 BitScanForward(&bitIndex, static_cast<DWORD>(dirtyBytes));
85 // Each byte is only ever set to 0 or 0xff
86 assert(bitIndex % 8 == 0);
87 size_t byteMask = static_cast<size_t>(0xff) << bitIndex;
88 assert((dirtyBytes & byteMask) == byteMask);
89 dirtyBytes ^= byteMask;
91 DWORD byteIndex = bitIndex / 8;
94 // Clear only the bytes for which pages are recorded as dirty
98 void *pageAddress = firstPageAddressInBlock + byteIndex * OS_PAGE_SIZE;
99 assert(pageAddress >= GetHeapStartAddress());
100 assert(pageAddress < GetHeapEndAddress());
101 assert(dirtyPageIndex < dirtyPageCount);
102 dirtyPages[dirtyPageIndex] = pageAddress;
104 if (dirtyPageIndex == dirtyPageCount)
112 void SoftwareWriteWatch::GetDirty(
114 size_t regionByteSize,
116 size_t *dirtyPageCountRef,
118 bool isRuntimeSuspended)
121 VerifyMemoryRegion(baseAddress, regionByteSize);
122 assert(dirtyPages != nullptr);
123 assert(dirtyPageCountRef != nullptr);
125 size_t dirtyPageCount = *dirtyPageCountRef;
126 if (dirtyPageCount == 0)
131 if (!isRuntimeSuspended)
133 // When a page is marked as dirty, a memory barrier is not issued after the write most of the time. Issue a memory
134 // barrier on all active threads of the process now to make recent changes to dirty state visible to this thread.
135 GCToOSInterface::FlushProcessWriteBuffers();
138 uint8_t *tableRegionStart;
139 size_t tableRegionByteSize;
140 TranslateToTableRegion(baseAddress, regionByteSize, &tableRegionStart, &tableRegionByteSize);
141 uint8_t *tableRegionEnd = tableRegionStart + tableRegionByteSize;
143 uint8_t *blockStart = ALIGN_DOWN(tableRegionStart, sizeof(size_t));
144 assert(blockStart >= GetUntranslatedTable());
145 uint8_t *blockEnd = ALIGN_UP(tableRegionEnd, sizeof(size_t));
146 assert(blockEnd <= GetUntranslatedTableEnd());
147 uint8_t *fullBlockEnd = ALIGN_DOWN(tableRegionEnd, sizeof(size_t));
149 size_t dirtyPageIndex = 0;
150 uint8_t *currentBlock = blockStart;
151 uint8_t *firstPageAddressInCurrentBlock = reinterpret_cast<uint8_t *>(GetPageAddress(currentBlock - GetTable()));
155 if (blockStart == fullBlockEnd)
157 if (GetDirtyFromBlock(
159 firstPageAddressInCurrentBlock,
160 tableRegionStart - blockStart,
161 tableRegionEnd - fullBlockEnd,
167 *dirtyPageCountRef = dirtyPageIndex;
172 if (tableRegionStart != blockStart)
174 if (!GetDirtyFromBlock(
176 firstPageAddressInCurrentBlock,
177 tableRegionStart - blockStart,
186 currentBlock += sizeof(size_t);
187 firstPageAddressInCurrentBlock += sizeof(size_t) * OS_PAGE_SIZE;
190 while (currentBlock < fullBlockEnd)
192 if (!GetDirtyFromBlock(
194 firstPageAddressInCurrentBlock,
204 currentBlock += sizeof(size_t);
205 firstPageAddressInCurrentBlock += sizeof(size_t) * OS_PAGE_SIZE;
207 if (currentBlock < fullBlockEnd)
212 if (tableRegionEnd != fullBlockEnd &&
215 firstPageAddressInCurrentBlock,
217 tableRegionEnd - fullBlockEnd,
226 *dirtyPageCountRef = dirtyPageIndex;
229 if (!isRuntimeSuspended && clearDirty && dirtyPageIndex != 0)
231 // When dirtying a page, the dirty state of the page is first checked to see if the page is already dirty. If already
232 // dirty, the write to mark it as dirty is skipped. So, when the dirty state of a page is cleared, we need to make sure
233 // the cleared state is visible to other threads that may dirty the page, before marking through objects in the page, so
234 // that the GC will not miss marking through dirtied objects in the page. Issue a memory barrier on all active threads
235 // of the process now.
236 MemoryBarrier(); // flush writes from this thread first to guarantee ordering
237 GCToOSInterface::FlushProcessWriteBuffers();
241 #endif // !DACCESS_COMPILE
242 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP