Merge pull request #11077 from pgavlin/gh10940
[platform/upstream/coreclr.git] / src / gc / softwarewritewatch.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 #include "common.h"
6 #include "gcenv.h"
7 #include "env/gcenv.os.h"
8 #include "softwarewritewatch.h"
9
10 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
11 #ifndef DACCESS_COMPILE
12
13 static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift) == OS_PAGE_SIZE, "Unexpected OS_PAGE_SIZE");
14
15 extern "C"
16 {
17     uint8_t *g_gc_sw_ww_table = nullptr;
18     bool g_gc_sw_ww_enabled_for_gc_heap = false;
19 }
20
21 void SoftwareWriteWatch::StaticClose()
22 {
23     if (GetTable() == nullptr)
24     {
25         return;
26     }
27
28     g_gc_sw_ww_enabled_for_gc_heap = false;
29     g_gc_sw_ww_table = nullptr;
30 }
31
32 bool SoftwareWriteWatch::GetDirtyFromBlock(
33     uint8_t *block,
34     uint8_t *firstPageAddressInBlock,
35     size_t startByteIndex,
36     size_t endByteIndex,
37     void **dirtyPages,
38     size_t *dirtyPageIndexRef,
39     size_t dirtyPageCount,
40     bool clearDirty)
41 {
42     assert(block != nullptr);
43     assert(ALIGN_DOWN(block, sizeof(size_t)) == block);
44     assert(firstPageAddressInBlock == reinterpret_cast<uint8_t *>(GetPageAddress(block - GetTable())));
45     assert(startByteIndex < endByteIndex);
46     assert(endByteIndex <= sizeof(size_t));
47     assert(dirtyPages != nullptr);
48     assert(dirtyPageIndexRef != nullptr);
49
50     size_t &dirtyPageIndex = *dirtyPageIndexRef;
51     assert(dirtyPageIndex < dirtyPageCount);
52
53     size_t dirtyBytes = *reinterpret_cast<size_t *>(block);
54     if (dirtyBytes == 0)
55     {
56         return true;
57     }
58
59     if (startByteIndex != 0)
60     {
61         size_t numLowBitsToClear = startByteIndex * 8;
62         dirtyBytes >>= numLowBitsToClear;
63         dirtyBytes <<= numLowBitsToClear;
64     }
65     if (endByteIndex != sizeof(size_t))
66     {
67         size_t numHighBitsToClear = (sizeof(size_t) - endByteIndex) * 8;
68         dirtyBytes <<= numHighBitsToClear;
69         dirtyBytes >>= numHighBitsToClear;
70     }
71
72     while (dirtyBytes != 0)
73     {
74         DWORD bitIndex;
75         static_assert(sizeof(size_t) <= 8, "Unexpected sizeof(size_t)");
76         if (sizeof(size_t) == 8)
77         {
78             BitScanForward64(&bitIndex, static_cast<DWORD64>(dirtyBytes));
79         }
80         else
81         {
82             BitScanForward(&bitIndex, static_cast<DWORD>(dirtyBytes));
83         }
84
85         // Each byte is only ever set to 0 or 0xff
86         assert(bitIndex % 8 == 0);
87         size_t byteMask = static_cast<size_t>(0xff) << bitIndex;
88         assert((dirtyBytes & byteMask) == byteMask);
89         dirtyBytes ^= byteMask;
90
91         DWORD byteIndex = bitIndex / 8;
92         if (clearDirty)
93         {
94             // Clear only the bytes for which pages are recorded as dirty
95             block[byteIndex] = 0;
96         }
97
98         void *pageAddress = firstPageAddressInBlock + byteIndex * OS_PAGE_SIZE;
99         assert(pageAddress >= GetHeapStartAddress());
100         assert(pageAddress < GetHeapEndAddress());
101         assert(dirtyPageIndex < dirtyPageCount);
102         dirtyPages[dirtyPageIndex] = pageAddress;
103         ++dirtyPageIndex;
104         if (dirtyPageIndex == dirtyPageCount)
105         {
106             return false;
107         }
108     }
109     return true;
110 }
111
112 void SoftwareWriteWatch::GetDirty(
113     void *baseAddress,
114     size_t regionByteSize,
115     void **dirtyPages,
116     size_t *dirtyPageCountRef,
117     bool clearDirty,
118     bool isRuntimeSuspended)
119 {
120     VerifyCreated();
121     VerifyMemoryRegion(baseAddress, regionByteSize);
122     assert(dirtyPages != nullptr);
123     assert(dirtyPageCountRef != nullptr);
124
125     size_t dirtyPageCount = *dirtyPageCountRef;
126     if (dirtyPageCount == 0)
127     {
128         return;
129     }
130
131     if (!isRuntimeSuspended)
132     {
133         // When a page is marked as dirty, a memory barrier is not issued after the write most of the time. Issue a memory
134         // barrier on all active threads of the process now to make recent changes to dirty state visible to this thread.
135         GCToOSInterface::FlushProcessWriteBuffers();
136     }
137
138     uint8_t *tableRegionStart;
139     size_t tableRegionByteSize;
140     TranslateToTableRegion(baseAddress, regionByteSize, &tableRegionStart, &tableRegionByteSize);
141     uint8_t *tableRegionEnd = tableRegionStart + tableRegionByteSize;
142
143     uint8_t *blockStart = ALIGN_DOWN(tableRegionStart, sizeof(size_t));
144     assert(blockStart >= GetUntranslatedTable());
145     uint8_t *blockEnd = ALIGN_UP(tableRegionEnd, sizeof(size_t));
146     assert(blockEnd <= GetUntranslatedTableEnd());
147     uint8_t *fullBlockEnd = ALIGN_DOWN(tableRegionEnd, sizeof(size_t));
148
149     size_t dirtyPageIndex = 0;
150     uint8_t *currentBlock = blockStart;
151     uint8_t *firstPageAddressInCurrentBlock = reinterpret_cast<uint8_t *>(GetPageAddress(currentBlock - GetTable()));
152
153     do
154     {
155         if (blockStart == fullBlockEnd)
156         {
157             if (GetDirtyFromBlock(
158                     currentBlock,
159                     firstPageAddressInCurrentBlock,
160                     tableRegionStart - blockStart,
161                     tableRegionEnd - fullBlockEnd,
162                     dirtyPages,
163                     &dirtyPageIndex,
164                     dirtyPageCount,
165                     clearDirty))
166             {
167                 *dirtyPageCountRef = dirtyPageIndex;
168             }
169             break;
170         }
171
172         if (tableRegionStart != blockStart)
173         {
174             if (!GetDirtyFromBlock(
175                     currentBlock,
176                     firstPageAddressInCurrentBlock,
177                     tableRegionStart - blockStart,
178                     sizeof(size_t),
179                     dirtyPages,
180                     &dirtyPageIndex,
181                     dirtyPageCount,
182                     clearDirty))
183             {
184                 break;
185             }
186             currentBlock += sizeof(size_t);
187             firstPageAddressInCurrentBlock += sizeof(size_t) * OS_PAGE_SIZE;
188         }
189
190         while (currentBlock < fullBlockEnd)
191         {
192             if (!GetDirtyFromBlock(
193                     currentBlock,
194                     firstPageAddressInCurrentBlock,
195                     0,
196                     sizeof(size_t),
197                     dirtyPages,
198                     &dirtyPageIndex,
199                     dirtyPageCount,
200                     clearDirty))
201             {
202                 break;
203             }
204             currentBlock += sizeof(size_t);
205             firstPageAddressInCurrentBlock += sizeof(size_t) * OS_PAGE_SIZE;
206         }
207         if (currentBlock < fullBlockEnd)
208         {
209             break;
210         }
211
212         if (tableRegionEnd != fullBlockEnd &&
213             !GetDirtyFromBlock(
214                 currentBlock,
215                 firstPageAddressInCurrentBlock,
216                 0,
217                 tableRegionEnd - fullBlockEnd,
218                 dirtyPages,
219                 &dirtyPageIndex,
220                 dirtyPageCount,
221                 clearDirty))
222         {
223             break;
224         }
225
226         *dirtyPageCountRef = dirtyPageIndex;
227     } while (false);
228
229     if (!isRuntimeSuspended && clearDirty && dirtyPageIndex != 0)
230     {
231         // When dirtying a page, the dirty state of the page is first checked to see if the page is already dirty. If already
232         // dirty, the write to mark it as dirty is skipped. So, when the dirty state of a page is cleared, we need to make sure
233         // the cleared state is visible to other threads that may dirty the page, before marking through objects in the page, so
234         // that the GC will not miss marking through dirtied objects in the page. Issue a memory barrier on all active threads
235         // of the process now.
236         MemoryBarrier(); // flush writes from this thread first to guarantee ordering
237         GCToOSInterface::FlushProcessWriteBuffers();
238     }
239 }
240
241 #endif // !DACCESS_COMPILE
242 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP