1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
4 // File: VirtualCallStub.CPP
6 // This file contains the virtual call stub manager and caches
14 // ============================================================================
19 #ifdef FEATURE_PERFMAP
23 #ifndef DACCESS_COMPILE
25 //@TODO: make these conditional on whether logs are being produced
26 //instrumentation counters
27 UINT32 g_site_counter = 0; //# of call sites
28 UINT32 g_site_write = 0; //# of call site backpatch writes
29 UINT32 g_site_write_poly = 0; //# of call site backpatch writes to point to resolve stubs
30 UINT32 g_site_write_mono = 0; //# of call site backpatch writes to point to dispatch stubs
32 UINT32 g_stub_lookup_counter = 0; //# of lookup stubs
33 UINT32 g_stub_mono_counter = 0; //# of dispatch stubs
34 UINT32 g_stub_poly_counter = 0; //# of resolve stubs
35 UINT32 g_stub_vtable_counter = 0; //# of vtable call stubs
36 UINT32 g_stub_space = 0; //# of bytes of stubs
38 UINT32 g_reclaim_counter = 0; //# of times a ReclaimAll was performed
40 UINT32 g_worker_call = 0; //# of calls into ResolveWorker
41 UINT32 g_worker_call_no_patch = 0;
42 UINT32 g_worker_collide_to_mono = 0; //# of times we converted a poly stub to a mono stub instead of writing the cache entry
44 UINT32 g_external_call = 0; //# of calls into GetTarget(token, pMT)
45 UINT32 g_external_call_no_patch = 0;
47 UINT32 g_insert_cache_external = 0; //# of times Insert was called for IK_EXTERNAL
48 UINT32 g_insert_cache_shared = 0; //# of times Insert was called for IK_SHARED
49 UINT32 g_insert_cache_dispatch = 0; //# of times Insert was called for IK_DISPATCH
50 UINT32 g_insert_cache_resolve = 0; //# of times Insert was called for IK_RESOLVE
51 UINT32 g_insert_cache_hit = 0; //# of times Insert found an empty cache entry
52 UINT32 g_insert_cache_miss = 0; //# of times Insert already had a matching cache entry
53 UINT32 g_insert_cache_collide = 0; //# of times Insert found a used cache entry
54 UINT32 g_insert_cache_write = 0; //# of times Insert wrote a cache entry
56 UINT32 g_cache_entry_counter = 0; //# of cache structs
57 UINT32 g_cache_entry_space = 0; //# of bytes used by cache lookup structs
59 UINT32 g_call_lookup_counter = 0; //# of times lookup stubs entered
61 UINT32 g_mono_call_counter = 0; //# of time dispatch stubs entered
62 UINT32 g_mono_miss_counter = 0; //# of times expected MT did not match actual MT (dispatch stubs)
64 UINT32 g_poly_call_counter = 0; //# of times resolve stubs entered
65 UINT32 g_poly_miss_counter = 0; //# of times cache missed (resolve stub)
67 UINT32 g_chained_lookup_call_counter = 0; //# of hits in a chained lookup
68 UINT32 g_chained_lookup_miss_counter = 0; //# of misses in a chained lookup
70 UINT32 g_chained_lookup_external_call_counter = 0; //# of hits in an external chained lookup
71 UINT32 g_chained_lookup_external_miss_counter = 0; //# of misses in an external chained lookup
73 UINT32 g_chained_entry_promoted = 0; //# of times a cache entry is promoted to the start of the chain
75 UINT32 g_bucket_space = 0; //# of bytes in caches and tables, not including the stubs themselves
76 UINT32 g_bucket_space_dead = 0; //# of bytes of abandoned buckets not yet recycled
78 #endif // !DACCESS_COMPILE
80 // This is the number of times a successful chain lookup will occur before the
81 // entry is promoted to the front of the chain. This is declared as extern because
82 // the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header.
84 extern "C" size_t g_dispatch_cache_chain_success_counter;
86 extern size_t g_dispatch_cache_chain_success_counter;
90 #include "virtualcallstub.h"
92 #include "profilepriv.h"
93 #include "contractimpl.h"
94 #include "dynamicinterfacecastable.h"
96 SPTR_IMPL_INIT(VirtualCallStubManagerManager, VirtualCallStubManagerManager, g_pManager, NULL);
98 #ifndef DACCESS_COMPILE
101 UINT32 STUB_MISS_COUNT_VALUE = 100;
102 UINT32 STUB_COLLIDE_WRITE_PCT = 100;
103 UINT32 STUB_COLLIDE_MONO_PCT = 0;
104 #endif // STUB_LOGGING
106 FastTable::NumCallStubs_t FastTable::NumCallStubs;
108 FastTable* BucketTable::dead = NULL; //linked list of the abandoned buckets
110 DispatchCache *g_resolveCache = NULL; //cache of dispatch stubs for in line lookup by resolve stubs.
112 size_t g_dispatch_cache_chain_success_counter = CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT;
115 UINT32 g_resetCacheCounter;
116 UINT32 g_resetCacheIncr;
117 UINT32 g_dumpLogCounter;
118 UINT32 g_dumpLogIncr;
119 #endif // STUB_LOGGING
121 //@TODO: use the existing logging mechanisms. for now we write to a file.
122 HANDLE g_hStubLogFile;
124 void VirtualCallStubManager::StartupLogging()
138 FAULT_NOT_FATAL(); // We handle filecreation problems locally
140 str.Printf("StubLog_%d.log", GetCurrentProcessId());
141 g_hStubLogFile = WszCreateFile (str.GetUnicode(),
146 FILE_ATTRIBUTE_NORMAL,
152 EX_END_CATCH(SwallowAllExceptions)
154 if (g_hStubLogFile == INVALID_HANDLE_VALUE) {
155 g_hStubLogFile = NULL;
159 #define OUTPUT_FORMAT_INT "\t%-30s %d\r\n"
160 #define OUTPUT_FORMAT_SIZE "\t%-30s %zu\r\n"
161 #define OUTPUT_FORMAT_PCT "\t%-30s %#5.2f%%\r\n"
162 #define OUTPUT_FORMAT_INT_PCT "\t%-30s %5d (%#5.2f%%)\r\n"
164 void VirtualCallStubManager::LoggingDump()
174 VirtualCallStubManagerIterator it =
175 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
179 it.Current()->LogStats();
182 g_resolveCache->LogStats();
184 // Temp space to use for formatting the output.
185 static const int FMT_STR_SIZE = 160;
186 char szPrintStr[FMT_STR_SIZE];
192 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub tuning parameters\r\n");
193 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
195 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d (0x%02x)\r\n", "STUB_MISS_COUNT_VALUE",
196 STUB_MISS_COUNT_VALUE, STUB_MISS_COUNT_VALUE);
197 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
198 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_WRITE_PCT",
199 STUB_COLLIDE_WRITE_PCT, STUB_COLLIDE_WRITE_PCT);
200 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
201 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_MONO_PCT",
202 STUB_COLLIDE_MONO_PCT, STUB_COLLIDE_MONO_PCT);
203 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
204 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogCounter",
205 g_dumpLogCounter, g_dumpLogCounter);
206 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
207 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogIncr",
208 g_dumpLogCounter, g_dumpLogIncr);
209 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
210 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheCounter",
211 g_resetCacheCounter, g_resetCacheCounter);
212 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
213 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheIncr",
214 g_resetCacheCounter, g_resetCacheIncr);
215 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
216 #endif // STUB_LOGGING
218 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nsite data\r\n");
219 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
222 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", g_site_counter);
223 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
224 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write", g_site_write);
225 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
226 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", g_site_write_mono);
227 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
228 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", g_site_write_poly);
229 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
231 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "reclaim_counter", g_reclaim_counter);
232 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
234 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub data\r\n");
235 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
237 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", g_stub_lookup_counter);
238 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
239 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", g_stub_mono_counter);
240 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
241 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", g_stub_poly_counter);
242 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
243 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_vtable_counter", g_stub_vtable_counter);
244 WriteFile(g_hStubLogFile, szPrintStr, (DWORD)strlen(szPrintStr), &dwWriteByte, NULL);
245 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", g_stub_space);
246 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
250 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nlookup stub data\r\n");
251 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
253 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
255 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "lookup_call_counter", g_call_lookup_counter);
256 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
258 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "total stub dispatch calls", total_calls);
259 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
261 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "mono stub data",
262 100.0 * double(g_mono_call_counter)/double(total_calls));
263 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
265 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "mono_call_counter", g_mono_call_counter);
266 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
267 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "mono_miss_counter", g_mono_miss_counter);
268 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
269 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
270 100.0 * double(g_mono_miss_counter)/double(g_mono_call_counter));
271 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
273 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "poly stub data",
274 100.0 * double(g_poly_call_counter)/double(total_calls));
275 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
277 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "poly_call_counter", g_poly_call_counter);
278 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
279 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "poly_miss_counter", g_poly_miss_counter);
280 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
281 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
282 100.0 * double(g_poly_miss_counter)/double(g_poly_call_counter));
283 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
284 #endif // STUB_LOGGING
287 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nchain lookup data\r\n");
288 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
291 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_call_counter", g_chained_lookup_call_counter);
292 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
293 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_miss_counter", g_chained_lookup_miss_counter);
294 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
295 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
296 100.0 * double(g_chained_lookup_miss_counter)/double(g_chained_lookup_call_counter));
297 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
299 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_call_counter", g_chained_lookup_external_call_counter);
300 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
301 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_miss_counter", g_chained_lookup_external_miss_counter);
302 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
303 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
304 100.0 * double(g_chained_lookup_external_miss_counter)/double(g_chained_lookup_external_call_counter));
305 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
306 #endif // STUB_LOGGING
307 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_entry_promoted", g_chained_entry_promoted);
308 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
309 #endif // CHAIN_LOOKUP
312 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "worker (slow resolver) data",
313 100.0 * double(g_worker_call)/double(total_calls));
314 #else // !STUB_LOGGING
315 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nworker (slow resolver) data\r\n");
316 #endif // !STUB_LOGGING
317 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
319 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_call", g_worker_call);
320 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
321 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_call_no_patch", g_worker_call_no_patch);
322 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
323 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "external_call", g_external_call);
324 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
325 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "external_call_no_patch", g_external_call_no_patch);
326 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
327 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_collide_to_mono", g_worker_collide_to_mono);
328 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
330 UINT32 total_inserts = g_insert_cache_external
331 + g_insert_cache_shared
332 + g_insert_cache_dispatch
333 + g_insert_cache_resolve;
335 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "insert cache data", total_inserts);
336 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
338 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_external", g_insert_cache_external,
339 100.0 * double(g_insert_cache_external)/double(total_inserts));
340 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
341 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_shared", g_insert_cache_shared,
342 100.0 * double(g_insert_cache_shared)/double(total_inserts));
343 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
344 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_dispatch", g_insert_cache_dispatch,
345 100.0 * double(g_insert_cache_dispatch)/double(total_inserts));
346 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
347 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_resolve", g_insert_cache_resolve,
348 100.0 * double(g_insert_cache_resolve)/double(total_inserts));
349 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
350 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_hit", g_insert_cache_hit,
351 100.0 * double(g_insert_cache_hit)/double(total_inserts));
352 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
353 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_miss", g_insert_cache_miss,
354 100.0 * double(g_insert_cache_miss)/double(total_inserts));
355 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
356 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_collide", g_insert_cache_collide,
357 100.0 * double(g_insert_cache_collide)/double(total_inserts));
358 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
359 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_write", g_insert_cache_write,
360 100.0 * double(g_insert_cache_write)/double(total_inserts));
361 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
363 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache data\r\n");
364 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
367 g_resolveCache->GetLoadFactor(&total, &used);
369 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_SIZE, "cache_entry_used", used);
370 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
371 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", g_cache_entry_counter);
372 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
373 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", g_cache_entry_space);
374 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
376 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub hash table data\r\n");
377 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
379 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space", g_bucket_space);
380 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
381 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space_dead", g_bucket_space_dead);
382 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
384 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache_load:\t%zu used, %zu total, utilization %#5.2f%%\r\n",
385 used, total, 100.0 * double(used) / double(total));
386 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
389 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache entry write counts\r\n");
390 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
391 DispatchCache::CacheEntryData *rgCacheData = g_resolveCache->cacheData;
392 for (UINT16 i = 0; i < CALL_STUB_CACHE_SIZE; i++)
394 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), " %4d", rgCacheData[i]);
395 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
398 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n");
399 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
402 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n");
403 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
404 #endif // STUB_LOGGING
407 for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
409 if (ContractImplMap::deltasDescs[i] != 0)
411 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "deltasDescs[%d]\t%d\r\n", i, ContractImplMap::deltasDescs[i]);
412 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
415 for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
417 if (ContractImplMap::deltasSlots[i] != 0)
419 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "deltasSlots[%d]\t%d\r\n", i, ContractImplMap::deltasSlots[i]);
420 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
423 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "cout of maps:\t%d\r\n", ContractImplMap::countMaps);
424 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
425 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "count of interfaces:\t%d\r\n", ContractImplMap::countInterfaces);
426 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
427 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "count of deltas:\t%d\r\n", ContractImplMap::countDelta);
428 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
429 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "total delta for descs:\t%d\r\n", ContractImplMap::totalDeltaDescs);
430 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
431 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "total delta for slots:\t%d\r\n", ContractImplMap::totalDeltaSlots);
432 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
438 void VirtualCallStubManager::FinishLogging()
444 CloseHandle(g_hStubLogFile);
446 g_hStubLogFile = NULL;
449 void VirtualCallStubManager::ResetCache()
459 g_resolveCache->LogStats();
461 g_insert_cache_external = 0;
462 g_insert_cache_shared = 0;
463 g_insert_cache_dispatch = 0;
464 g_insert_cache_resolve = 0;
465 g_insert_cache_hit = 0;
466 g_insert_cache_miss = 0;
467 g_insert_cache_collide = 0;
468 g_insert_cache_write = 0;
470 // Go through each cache entry and if the cache element there is in
471 // the cache entry heap of the manager being deleted, then we just
472 // set the cache entry to empty.
473 DispatchCache::Iterator it(g_resolveCache);
481 void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderAllocator)
486 PRECONDITION(CheckPointer(pDomain));
487 INJECT_FAULT(COMPlusThrowOM(););
490 // Record the parent domain
491 parentDomain = pDomain;
492 m_loaderAllocator = pLoaderAllocator;
495 // Init critical sections
498 m_indCellLock.Init(CrstVSDIndirectionCellLock, CRST_UNSAFE_ANYMODE);
501 // Now allocate all BucketTables
504 NewHolder<BucketTable> resolvers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
505 NewHolder<BucketTable> dispatchers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS*2));
506 NewHolder<BucketTable> lookups_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
507 NewHolder<BucketTable> vtableCallers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
508 NewHolder<BucketTable> cache_entries_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
511 // Now allocate our LoaderHeaps
515 // First do some calculation to determine how many pages that we
516 // will need to commit and reserve for each out our loader heaps
518 DWORD indcell_heap_reserve_size;
519 DWORD indcell_heap_commit_size;
520 DWORD cache_entry_heap_reserve_size;
521 DWORD cache_entry_heap_commit_size;
524 // Setup an expected number of items to commit and reserve
526 // The commit number is not that important as we always commit at least one page worth of items
527 // The reserve number should be high enough to cover a typical lare application,
528 // in order to minimize the fragmentation of our rangelists
531 indcell_heap_commit_size = 16; indcell_heap_reserve_size = 2000;
532 cache_entry_heap_commit_size = 16; cache_entry_heap_reserve_size = 800;
535 // Convert the number of items into a size in bytes to commit and reserve
537 indcell_heap_reserve_size *= sizeof(void *);
538 indcell_heap_commit_size *= sizeof(void *);
540 cache_entry_heap_reserve_size *= sizeof(ResolveCacheElem);
541 cache_entry_heap_commit_size *= sizeof(ResolveCacheElem);
544 // Align up all of the commit and reserve sizes
546 indcell_heap_reserve_size = (DWORD) ALIGN_UP(indcell_heap_reserve_size, GetOsPageSize());
547 indcell_heap_commit_size = (DWORD) ALIGN_UP(indcell_heap_commit_size, GetOsPageSize());
549 cache_entry_heap_reserve_size = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, GetOsPageSize());
550 cache_entry_heap_commit_size = (DWORD) ALIGN_UP(cache_entry_heap_commit_size, GetOsPageSize());
552 BYTE * initReservedMem = NULL;
554 if (!m_loaderAllocator->IsCollectible())
556 DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
557 cache_entry_heap_reserve_size;
559 DWORD dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSizeCalc, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
561 // If there's wasted reserved memory, we hand this out to the heaps to avoid waste.
563 DWORD dwWastedReserveMemSize = dwTotalReserveMemSize - dwTotalReserveMemSizeCalc;
564 if (dwWastedReserveMemSize != 0)
566 DWORD cWastedPages = dwWastedReserveMemSize / GetOsPageSize();
568 // Split the wasted pages over the 2 LoaderHeaps that we allocate as part of a VirtualCallStubManager
569 DWORD cPagesPerHeap = cWastedPages / 2;
570 DWORD cPagesRemainder = cWastedPages % 2; // We'll throw this at the cache entry heap
572 indcell_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
573 cache_entry_heap_reserve_size += (cPagesPerHeap + cPagesRemainder) * GetOsPageSize();
576 CONSISTENCY_CHECK((indcell_heap_reserve_size +
577 cache_entry_heap_reserve_size)==
578 dwTotalReserveMemSize);
581 initReservedMem = (BYTE*)ExecutableAllocator::Instance()->Reserve(dwTotalReserveMemSize);
583 m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
585 if (initReservedMem == NULL)
590 indcell_heap_reserve_size = GetOsPageSize();
591 indcell_heap_commit_size = GetOsPageSize();
593 cache_entry_heap_reserve_size = GetOsPageSize();
594 cache_entry_heap_commit_size = GetOsPageSize();
597 DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
598 cache_entry_heap_reserve_size;
601 DWORD dwActualVSDSize = 0;
603 initReservedMem = pLoaderAllocator->GetVSDHeapInitialBlock(&dwActualVSDSize);
604 _ASSERTE(dwActualVSDSize == dwTotalReserveMemSizeCalc);
606 m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
608 if (initReservedMem == NULL)
612 // Hot memory, Writable, No-Execute, infrequent writes
613 NewHolder<LoaderHeap> indcell_heap_holder(
614 new LoaderHeap(indcell_heap_reserve_size, indcell_heap_commit_size,
615 initReservedMem, indcell_heap_reserve_size,
616 NULL, UnlockedLoaderHeap::HeapKind::Data));
618 initReservedMem += indcell_heap_reserve_size;
620 // Hot memory, Writable, No-Execute, infrequent writes
621 NewHolder<LoaderHeap> cache_entry_heap_holder(
622 new LoaderHeap(cache_entry_heap_reserve_size, cache_entry_heap_commit_size,
623 initReservedMem, cache_entry_heap_reserve_size,
624 &cache_entry_rangeList, UnlockedLoaderHeap::HeapKind::Data));
626 initReservedMem += cache_entry_heap_reserve_size;
628 // Warm memory, Writable, Execute, write exactly once
629 NewHolder<CodeFragmentHeap> lookup_heap_holder(
630 new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_LOOKUP_STUB));
632 // Hot memory, Writable, Execute, write exactly once
633 NewHolder<CodeFragmentHeap> dispatch_heap_holder(
634 new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_DISPATCH_STUB));
636 // Hot memory, Writable, Execute, write exactly once
637 NewHolder<CodeFragmentHeap> resolve_heap_holder(
638 new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_RESOLVE_STUB));
640 // Hot memory, Writable, Execute, write exactly once
641 NewHolder<CodeFragmentHeap> vtable_heap_holder(
642 new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_VTABLE_STUB));
644 // Allocate the initial counter block
645 NewHolder<counter_block> m_counters_holder(new counter_block);
648 // On success of every allocation, assign the objects and suppress the release
651 indcell_heap = indcell_heap_holder; indcell_heap_holder.SuppressRelease();
652 lookup_heap = lookup_heap_holder; lookup_heap_holder.SuppressRelease();
653 dispatch_heap = dispatch_heap_holder; dispatch_heap_holder.SuppressRelease();
654 resolve_heap = resolve_heap_holder; resolve_heap_holder.SuppressRelease();
655 vtable_heap = vtable_heap_holder; vtable_heap_holder.SuppressRelease();
656 cache_entry_heap = cache_entry_heap_holder; cache_entry_heap_holder.SuppressRelease();
658 resolvers = resolvers_holder; resolvers_holder.SuppressRelease();
659 dispatchers = dispatchers_holder; dispatchers_holder.SuppressRelease();
660 lookups = lookups_holder; lookups_holder.SuppressRelease();
661 vtableCallers = vtableCallers_holder; vtableCallers_holder.SuppressRelease();
662 cache_entries = cache_entries_holder; cache_entries_holder.SuppressRelease();
664 m_counters = m_counters_holder; m_counters_holder.SuppressRelease();
666 // Create the initial failure counter block
667 m_counters->next = NULL;
668 m_counters->used = 0;
669 m_cur_counter_block = m_counters;
671 m_cur_counter_block_for_reclaim = m_counters;
672 m_cur_counter_block_for_reclaim_index = 0;
674 // Keep track of all of our managers
675 VirtualCallStubManagerManager::GlobalManager()->AddStubManager(this);
678 void VirtualCallStubManager::Uninit()
682 // Keep track of all our managers
683 VirtualCallStubManagerManager::GlobalManager()->RemoveStubManager(this);
686 VirtualCallStubManager::~VirtualCallStubManager()
696 // Go through each cache entry and if the cache element there is in
697 // the cache entry heap of the manager being deleted, then we just
698 // set the cache entry to empty.
699 DispatchCache::Iterator it(g_resolveCache);
702 // Using UnlinkEntry performs an implicit call to Next (see comment for UnlinkEntry).
703 // Thus, we need to avoid calling Next when we delete an entry so
704 // that we don't accidentally skip entries.
705 while (it.IsValid() && cache_entry_rangeList.IsInRange((TADDR)it.Entry()))
712 if (indcell_heap) { delete indcell_heap; indcell_heap = NULL;}
713 if (lookup_heap) { delete lookup_heap; lookup_heap = NULL;}
714 if (dispatch_heap) { delete dispatch_heap; dispatch_heap = NULL;}
715 if (resolve_heap) { delete resolve_heap; resolve_heap = NULL;}
716 if (vtable_heap) { delete vtable_heap; vtable_heap = NULL;}
717 if (cache_entry_heap) { delete cache_entry_heap; cache_entry_heap = NULL;}
719 if (resolvers) { delete resolvers; resolvers = NULL;}
720 if (dispatchers) { delete dispatchers; dispatchers = NULL;}
721 if (lookups) { delete lookups; lookups = NULL;}
722 if (vtableCallers) { delete vtableCallers; vtableCallers = NULL;}
723 if (cache_entries) { delete cache_entries; cache_entries = NULL;}
725 // Now get rid of the memory taken by the counter_blocks
726 while (m_counters != NULL)
728 counter_block *del = m_counters;
729 m_counters = m_counters->next;
733 // This was the block reserved by Init for the heaps.
734 // For the collectible case, the VSD logic does not allocate the memory.
735 if (m_initialReservedMemForHeaps && !m_loaderAllocator->IsCollectible())
736 ClrVirtualFree (m_initialReservedMemForHeaps, 0, MEM_RELEASE);
738 // Free critical section
739 m_indCellLock.Destroy();
742 // Initialize static structures, and start up logging if necessary
743 void VirtualCallStubManager::InitStatic()
745 STANDARD_VM_CONTRACT;
748 // Note if you change these values using environment variables then you must use hex values :-(
749 STUB_MISS_COUNT_VALUE = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubMissCount);
750 STUB_COLLIDE_WRITE_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideWritePct);
751 STUB_COLLIDE_MONO_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideMonoPct);
752 g_dumpLogCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogCounter);
753 g_dumpLogIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogIncr);
754 g_resetCacheCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheCounter);
755 g_resetCacheIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheIncr);
756 #endif // STUB_LOGGING
758 #ifndef STUB_DISPATCH_PORTABLE
759 DispatchHolder::InitializeStatic();
760 ResolveHolder::InitializeStatic();
761 #endif // !STUB_DISPATCH_PORTABLE
762 LookupHolder::InitializeStatic();
764 g_resolveCache = new DispatchCache();
766 if(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_VirtualCallStubLogging))
769 VirtualCallStubManagerManager::InitStatic();
772 // Static shutdown code.
773 // At the moment, this doesn't do anything more than log statistics.
774 void VirtualCallStubManager::UninitStatic()
784 if (g_hStubLogFile != NULL)
786 VirtualCallStubManagerIterator it =
787 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
790 it.Current()->LogStats();
793 g_resolveCache->LogStats();
799 /* reclaim/rearrange any structures that can only be done during a gc sync point
800 i.e. need to be serialized and non-concurrant. */
801 void VirtualCallStubManager::ReclaimAll()
803 STATIC_CONTRACT_NOTHROW;
804 STATIC_CONTRACT_GC_NOTRIGGER;
805 STATIC_CONTRACT_FORBID_FAULT;
807 /* @todo: if/when app domain unloading is supported,
808 and when we have app domain specific stub heaps, we can complete the unloading
809 of an app domain stub heap at this point, and make any patches to existing stubs that are
810 not being unload so that they nolonger refer to any of the unloaded app domains code or types
813 //reclaim space of abandoned buckets
814 BucketTable::Reclaim();
816 VirtualCallStubManagerIterator it =
817 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
820 it.Current()->Reclaim();
826 /* reclaim/rearrange any structures that can only be done during a gc sync point
827 i.e. need to be serialized and non-concurrant. */
828 void VirtualCallStubManager::Reclaim()
830 LIMITED_METHOD_CONTRACT;
832 UINT32 limit = min(counter_block::MAX_COUNTER_ENTRIES,
833 m_cur_counter_block_for_reclaim->used);
834 limit = min(m_cur_counter_block_for_reclaim_index + 16, limit);
836 for (UINT32 i = m_cur_counter_block_for_reclaim_index; i < limit; i++)
838 m_cur_counter_block_for_reclaim->block[i] += (STUB_MISS_COUNT_VALUE/10)+1;
841 // Increment the index by the number we processed
842 m_cur_counter_block_for_reclaim_index = limit;
844 // If we ran to the end of the block, go to the next
845 if (m_cur_counter_block_for_reclaim_index == m_cur_counter_block->used)
847 m_cur_counter_block_for_reclaim = m_cur_counter_block_for_reclaim->next;
848 m_cur_counter_block_for_reclaim_index = 0;
850 // If this was the last block in the chain, go back to the beginning
851 if (m_cur_counter_block_for_reclaim == NULL)
852 m_cur_counter_block_for_reclaim = m_counters;
856 #endif // !DACCESS_COMPILE
858 //----------------------------------------------------------------------------
860 VirtualCallStubManager *VirtualCallStubManager::FindStubManager(PCODE stubAddress, StubCodeBlockKind* wbStubKind)
869 StubCodeBlockKind unusedStubKind;
870 if (wbStubKind == NULL)
872 wbStubKind = &unusedStubKind;
875 *wbStubKind = STUB_CODE_BLOCK_UNKNOWN;
877 RangeSection * pRS = ExecutionManager::FindCodeRange(stubAddress, ExecutionManager::ScanReaderLock);
881 StubCodeBlockKind kind = pRS->_pjit->GetStubCodeBlockKind(pRS, stubAddress);
884 case STUB_CODE_BLOCK_VSD_DISPATCH_STUB:
885 case STUB_CODE_BLOCK_VSD_RESOLVE_STUB:
886 case STUB_CODE_BLOCK_VSD_LOOKUP_STUB:
887 case STUB_CODE_BLOCK_VSD_VTABLE_STUB:
888 // This is a VSD stub, using the RangeSection identify which LoaderAllocator this is from
889 _ASSERTE(pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP);
891 return pRS->_pHeapList->pLoaderAllocator->GetVirtualCallStubManager();
898 /* for use by debugger.
900 BOOL VirtualCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
902 STATIC_CONTRACT_NOTHROW;
903 STATIC_CONTRACT_GC_NOTRIGGER;
904 STATIC_CONTRACT_FORBID_FAULT;
907 // Forwarded to from RangeSectionStubManager
911 /* for use by debugger.
914 extern "C" void STDCALL StubDispatchFixupPatchLabel();
916 BOOL VirtualCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
918 LIMITED_METHOD_CONTRACT;
920 LOG((LF_CORDB, LL_EVERYTHING, "VirtualCallStubManager::DoTraceStub called\n"));
922 // @workaround: Well, we really need the context to figure out where we're going, so
923 // we'll do a TRACE_MGR_PUSH so that TraceManager gets called and we can use
924 // the provided context to figure out where we're going.
925 trace->InitForManagerPush(stubStartAddress, this);
929 //----------------------------------------------------------------------------
930 BOOL VirtualCallStubManager::TraceManager(Thread *thread,
931 TraceDestination *trace,
939 INJECT_FAULT(COMPlusThrowOM(););
943 TADDR pStub = GetIP(pContext);
945 // The return address should be on the top of the stack
946 *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
948 // Get the token from the stub
949 DispatchToken token(GetTokenFromStub(pStub));
951 // Get the this object from ECX
952 Object *pObj = StubManagerHelpers::GetThisPtr(pContext);
954 // Call common trace code.
955 return (TraceResolver(pObj, token, trace));
958 #ifndef DACCESS_COMPILE
960 PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, MethodDesc *pMD)
966 PRECONDITION(CheckPointer(pMD));
967 PRECONDITION(!pMD->IsInterface() || ownerType.GetMethodTable()->HasSameTypeDefAs(pMD->GetMethodTable()));
968 INJECT_FAULT(COMPlusThrowOM(););
971 return GetCallStub(ownerType, pMD->GetSlot());
974 //find or create a stub
975 PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, DWORD slot)
981 INJECT_FAULT(COMPlusThrowOM(););
982 POSTCONDITION(RETVAL != NULL);
985 GCX_COOP(); // This is necessary for BucketTable synchronization
987 MethodTable * pMT = ownerType.GetMethodTable();
990 if (pMT->IsInterface())
991 token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
993 token = DispatchToken::CreateDispatchToken(slot);
995 //get a stub from lookups, make if necessary
996 PCODE stub = CALL_STUB_EMPTY_ENTRY;
997 PCODE addrOfResolver = GetEEFuncEntryPoint(ResolveWorkerAsmStub);
1000 Prober probeL(&entryL);
1001 if (lookups->SetUpProber(token.To_SIZE_T(), 0, &probeL))
1003 if ((stub = (PCODE)(lookups->Find(&probeL))) == CALL_STUB_EMPTY_ENTRY)
1005 LookupHolder *pLookupHolder = GenerateLookupStub(addrOfResolver, token.To_SIZE_T());
1006 stub = (PCODE) (lookups->Add((size_t)(pLookupHolder->stub()->entryPoint()), &probeL));
1010 _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1011 stats.site_counter++;
1016 PCODE VirtualCallStubManager::GetVTableCallStub(DWORD slot)
1022 INJECT_FAULT(COMPlusThrowOM(););
1023 POSTCONDITION(RETVAL != NULL);
1026 GCX_COOP(); // This is necessary for BucketTable synchronization
1028 PCODE stub = CALL_STUB_EMPTY_ENTRY;
1030 VTableCallEntry entry;
1031 Prober probe(&entry);
1032 if (vtableCallers->SetUpProber(DispatchToken::CreateDispatchToken(slot).To_SIZE_T(), 0, &probe))
1034 if ((stub = (PCODE)(vtableCallers->Find(&probe))) == CALL_STUB_EMPTY_ENTRY)
1036 VTableCallHolder *pHolder = GenerateVTableCallStub(slot);
1037 stub = (PCODE)(vtableCallers->Add((size_t)(pHolder->stub()->entryPoint()), &probe));
1041 _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1045 VTableCallHolder* VirtualCallStubManager::GenerateVTableCallStub(DWORD slot)
1047 CONTRACT(VTableCallHolder*) {
1051 INJECT_FAULT(COMPlusThrowOM(););
1052 POSTCONDITION(RETVAL != NULL);
1055 //allocate from the requisite heap and copy the template over it.
1056 size_t vtableHolderSize = VTableCallHolder::GetHolderSize(slot);
1057 VTableCallHolder * pHolder = (VTableCallHolder*)(void*)vtable_heap->AllocAlignedMem(vtableHolderSize, CODE_SIZE_ALIGN);
1058 ExecutableWriterHolder<VTableCallHolder> vtableWriterHolder(pHolder, vtableHolderSize);
1059 vtableWriterHolder.GetRW()->Initialize(slot);
1061 ClrFlushInstructionCache(pHolder->stub(), pHolder->stub()->size());
1064 stats.stub_vtable_counter++;
1065 stats.stub_space += (UINT32)pHolder->stub()->size();
1066 LOG((LF_STUBS, LL_INFO10000, "GenerateVTableCallStub for slot " FMT_ADDR "at" FMT_ADDR "\n",
1067 DBG_ADDR(slot), DBG_ADDR(pHolder->stub())));
1069 #ifdef FEATURE_PERFMAP
1070 PerfMap::LogStubs(__FUNCTION__, "GenerateVTableCallStub", (PCODE)pHolder->stub(), pHolder->stub()->size());
1076 //+----------------------------------------------------------------------------
1078 // Method: VirtualCallStubManager::GenerateStubIndirection
1080 // Synopsis: This method allocates an indirection cell for use by the virtual stub dispatch (currently
1081 // only implemented for interface calls).
1082 // For normal methods: the indirection cell allocated will never be freed until app domain unload
1083 // For dynamic methods: we recycle the indirection cells when a dynamic method is collected. To
1084 // do that we keep all the recycled indirection cells in a linked list: m_RecycledIndCellList. When
1085 // the dynamic method needs an indirection cell it allocates one from m_RecycledIndCellList. Each
1086 // dynamic method keeps track of all the indirection cells it uses and add them back to
1087 // m_RecycledIndCellList when it is finalized.
1089 //+----------------------------------------------------------------------------
1090 BYTE *VirtualCallStubManager::GenerateStubIndirection(PCODE target, BOOL fUseRecycledCell /* = FALSE*/ )
1095 INJECT_FAULT(COMPlusThrowOM(););
1096 PRECONDITION(target != NULL);
1097 POSTCONDITION(CheckPointer(RETVAL));
1100 _ASSERTE(isStubStatic(target));
1102 CrstHolder lh(&m_indCellLock);
1104 // The indirection cell to hold the pointer to the stub
1106 UINT32 cellsPerBlock = INDCELLS_PER_BLOCK;
1108 // First try the recycled indirection cell list for Dynamic methods
1109 if (fUseRecycledCell)
1110 ret = GetOneRecycledIndCell();
1112 // Try the free indirection cell list
1114 ret = GetOneFreeIndCell();
1116 // Allocate from loader heap
1119 // Free list is empty, allocate a block of indcells from indcell_heap and insert it into the free list.
1120 BYTE ** pBlock = (BYTE **) (void *) indcell_heap->AllocMem(S_SIZE_T(cellsPerBlock) * S_SIZE_T(sizeof(BYTE *)));
1122 // return the first cell in the block and add the rest to the free list
1123 ret = (BYTE *)pBlock;
1125 // link all the cells together
1126 // we don't need to null terminate the linked list, InsertIntoFreeIndCellList will do it.
1127 for (UINT32 i = 1; i < cellsPerBlock - 1; ++i)
1129 pBlock[i] = (BYTE *)&(pBlock[i+1]);
1132 // insert the list into the free indcell list.
1133 InsertIntoFreeIndCellList((BYTE *)&pBlock[1], (BYTE*)&pBlock[cellsPerBlock - 1]);
1136 *((PCODE *)ret) = target;
1140 ResolveCacheElem *VirtualCallStubManager::GetResolveCacheElem(void *pMT,
1149 INJECT_FAULT(COMPlusThrowOM(););
1153 //get an cache entry elem, or make one if necessary
1154 ResolveCacheElem* elem = NULL;
1155 ResolveCacheEntry entryRC;
1156 Prober probeRC(&entryRC);
1157 if (cache_entries->SetUpProber(token, (size_t) pMT, &probeRC))
1159 elem = (ResolveCacheElem*) (cache_entries->Find(&probeRC));
1160 if (elem == CALL_STUB_EMPTY_ENTRY)
1162 bool reenteredCooperativeGCMode = false;
1163 elem = GenerateResolveCacheElem(target, pMT, token, &reenteredCooperativeGCMode);
1164 if (reenteredCooperativeGCMode)
1166 // The prober may have been invalidated by reentering cooperative GC mode, reset it
1167 BOOL success = cache_entries->SetUpProber(token, (size_t)pMT, &probeRC);
1170 elem = (ResolveCacheElem*) (cache_entries->Add((size_t) elem, &probeRC));
1173 _ASSERTE(elem && (elem != CALL_STUB_EMPTY_ENTRY));
1177 #endif // !DACCESS_COMPILE
1179 size_t VirtualCallStubManager::GetTokenFromStub(PCODE stub)
1189 _ASSERTE(stub != NULL);
1190 StubCodeBlockKind stubKind = STUB_CODE_BLOCK_UNKNOWN;
1191 VirtualCallStubManager * pMgr = FindStubManager(stub, &stubKind);
1193 return GetTokenFromStubQuick(pMgr, stub, stubKind);
1196 size_t VirtualCallStubManager::GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubCodeBlockKind kind)
1206 _ASSERTE(pMgr != NULL);
1207 _ASSERTE(stub != NULL);
1208 _ASSERTE(kind != STUB_CODE_BLOCK_UNKNOWN);
1210 #ifndef DACCESS_COMPILE
1212 if (kind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
1214 _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_DISPATCH_STUB);
1215 DispatchStub * dispatchStub = (DispatchStub *) PCODEToPINSTR(stub);
1216 ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(dispatchStub->failTarget());
1217 _ASSERTE(isResolvingStubStatic(resolveHolder->stub()->resolveEntryPoint()));
1218 return resolveHolder->stub()->token();
1220 else if (kind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
1222 _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_RESOLVE_STUB);
1223 ResolveHolder * resolveHolder = ResolveHolder::FromResolveEntry(stub);
1224 return resolveHolder->stub()->token();
1226 else if (kind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1228 _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_LOOKUP_STUB);
1229 LookupHolder * lookupHolder = LookupHolder::FromLookupEntry(stub);
1230 return lookupHolder->stub()->token();
1232 else if (kind == STUB_CODE_BLOCK_VSD_VTABLE_STUB)
1234 _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_VTABLE_STUB);
1235 VTableCallStub * vtableStub = (VTableCallStub *)PCODEToPINSTR(stub);
1236 return vtableStub->token();
1239 _ASSERTE(!"Should not get here.");
1241 #else // DACCESS_COMPILE
1245 #endif // DACCESS_COMPILE
1250 #ifndef DACCESS_COMPILE
1253 ResolveCacheElem* __fastcall VirtualCallStubManager::PromoteChainEntry(ResolveCacheElem *pElem)
1259 PRECONDITION(CheckPointer(pElem));
1262 g_resolveCache->PromoteChainEntry(pElem);
1265 #endif // CHAIN_LOOKUP
1267 /* Resolve to a method and return its address or NULL if there is none.
1268 Our return value is the target address that control should continue to. Our caller will
1269 enter the target address as if a direct call with the original stack frame had been made from
1270 the actual call site. Hence our strategy is to either return a target address
1271 of the actual method implementation, or the prestub if we cannot find the actual implementation.
1272 If we are returning a real method address, we may patch the original call site to point to a
1273 dispatching stub before returning. Note, if we encounter a method that hasn't been jitted
1274 yet, we will return the prestub, which should cause it to be jitted and we will
1275 be able to build the dispatching stub on a later call thru the call site. If we encounter
1276 any other kind of problem, rather than throwing an exception, we will also return the
1277 prestub, unless we are unable to find the method at all, in which case we return NULL.
1279 PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
1280 TADDR siteAddrForRegisterIndirect,
1290 INJECT_FAULT(COMPlusThrowOM(););
1291 PRECONDITION(CheckPointer(pTransitionBlock));
1295 MAKE_CURRENT_THREAD_AVAILABLE();
1298 Thread::ObjectRefFlush(CURRENT_THREAD);
1301 FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
1302 StubDispatchFrame * pSDFrame = &frame;
1304 PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
1306 StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1308 OBJECTREF *protectedObj = pSDFrame->GetThisPtr();
1309 _ASSERTE(protectedObj != NULL);
1310 OBJECTREF pObj = *protectedObj;
1312 PCODE target = NULL;
1315 pSDFrame->SetForNullReferenceException();
1316 pSDFrame->Push(CURRENT_THREAD);
1317 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1318 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1319 COMPlusThrow(kNullReferenceException);
1320 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1321 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1322 _ASSERTE(!"Throw returned");
1326 if (flags & SDF_ResolvePromoteChain)
1328 ResolveCacheElem * pElem = (ResolveCacheElem *)token;
1329 g_resolveCache->PromoteChainEntry(pElem);
1330 target = (PCODE) pElem->target;
1332 // Have we failed the dispatch stub too many times?
1333 if (flags & SDF_ResolveBackPatch)
1335 PCODE stubAddr = callSite.GetSiteTarget();
1336 VirtualCallStubManager * pMgr = VirtualCallStubManager::FindStubManager(stubAddr);
1337 pMgr->BackPatchWorker(&callSite);
1344 pSDFrame->SetCallSite(NULL, (TADDR)callSite.GetIndirectCell());
1346 DispatchToken representativeToken(token);
1347 MethodTable * pRepresentativeMT = pObj->GetMethodTable();
1348 if (representativeToken.IsTypedToken())
1350 pRepresentativeMT = CURRENT_THREAD->GetDomain()->LookupType(representativeToken.GetTypeID());
1351 CONSISTENCY_CHECK(CheckPointer(pRepresentativeMT));
1354 pSDFrame->SetRepresentativeSlot(pRepresentativeMT, representativeToken.GetSlotNumber());
1355 pSDFrame->Push(CURRENT_THREAD);
1356 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1357 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1359 // For Virtual Delegates the m_siteAddr is a field of a managed object
1360 // Thus we have to report it as an interior pointer,
1361 // so that it is updated during a gc
1362 GCPROTECT_BEGININTERIOR( *(callSite.GetIndirectCellAddress()) );
1364 GCStress<vsd_on_resolve>::MaybeTriggerAndProtect(pObj);
1366 PCODE callSiteTarget = callSite.GetSiteTarget();
1367 CONSISTENCY_CHECK(callSiteTarget != NULL);
1369 StubCodeBlockKind stubKind = STUB_CODE_BLOCK_UNKNOWN;
1370 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget, &stubKind);
1371 PREFIX_ASSUME(pMgr != NULL);
1374 // Have we failed the dispatch stub too many times?
1375 if (flags & SDF_ResolveBackPatch)
1377 pMgr->BackPatchWorker(&callSite);
1381 target = pMgr->ResolveWorker(&callSite, protectedObj, representativeToken, stubKind);
1384 if (pSDFrame->GetGCRefMap() != NULL)
1387 _ASSERTE(CheckGCRefMapEqual(pSDFrame->GetGCRefMap(), pSDFrame->GetFunction(), true));
1393 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1394 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1395 pSDFrame->Pop(CURRENT_THREAD);
1400 void VirtualCallStubManager::BackPatchWorkerStatic(PCODE returnAddress, TADDR siteAddrForRegisterIndirect)
1407 PRECONDITION(returnAddress != NULL);
1410 StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1412 PCODE callSiteTarget = callSite.GetSiteTarget();
1413 CONSISTENCY_CHECK(callSiteTarget != NULL);
1415 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget);
1416 PREFIX_ASSUME(pMgr != NULL);
1418 pMgr->BackPatchWorker(&callSite);
1421 #if defined(TARGET_X86) && defined(TARGET_UNIX)
1422 void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect)
1424 VirtualCallStubManager::BackPatchWorkerStatic(returnAddr, siteAddrForRegisterIndirect);
1428 PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite,
1429 OBJECTREF *protectedObj,
1430 DispatchToken token,
1431 StubCodeBlockKind stubKind)
1437 INJECT_FAULT(COMPlusThrowOM(););
1438 PRECONDITION(protectedObj != NULL);
1439 PRECONDITION(*protectedObj != NULL);
1440 PRECONDITION(IsProtectedByGCFrame(protectedObj));
1443 MethodTable* objectType = (*protectedObj)->GetMethodTable();
1444 CONSISTENCY_CHECK(CheckPointer(objectType));
1447 if (g_dumpLogCounter != 0)
1449 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1451 if (total_calls > g_dumpLogCounter)
1453 VirtualCallStubManager::LoggingDump();
1454 if (g_dumpLogIncr == 0)
1455 g_dumpLogCounter = 0;
1457 g_dumpLogCounter += g_dumpLogIncr;
1461 if (g_resetCacheCounter != 0)
1463 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1465 if (total_calls > g_resetCacheCounter)
1467 VirtualCallStubManager::ResetCache();
1468 if (g_resetCacheIncr == 0)
1469 g_resetCacheCounter = 0;
1471 g_resetCacheCounter += g_resetCacheIncr;
1474 #endif // STUB_LOGGING
1476 //////////////////////////////////////////////////////////////
1477 // Get the managers associated with the callee
1479 VirtualCallStubManager *pCalleeMgr = NULL; // Only set if the caller is shared, NULL otherwise
1481 BOOL bCallToShorterLivedTarget = FALSE;
1483 // We care about the following cases:
1484 // Call from any site -> collectible target
1485 if (objectType->GetLoaderAllocator()->IsCollectible())
1487 // The callee's manager
1488 pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
1489 if (pCalleeMgr != this)
1491 bCallToShorterLivedTarget = TRUE;
1499 stats.worker_call++;
1501 LOG((LF_STUBS, LL_INFO100000, "ResolveWorker from %sStub, token" FMT_ADDR "object's MT" FMT_ADDR "ind-cell" FMT_ADDR "call-site" FMT_ADDR "%s\n",
1502 (stubKind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB) ? "Dispatch" : (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB) ? "Resolve" : (stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB) ? "Lookup" : "Unknown",
1503 DBG_ADDR(token.To_SIZE_T()), DBG_ADDR(objectType), DBG_ADDR(pCallSite->GetIndirectCell()), DBG_ADDR(pCallSite->GetReturnAddress()),
1504 bCallToShorterLivedTarget ? "bCallToShorterLivedTarget" : "" ));
1506 PCODE stub = CALL_STUB_EMPTY_ENTRY;
1507 PCODE target = NULL;
1510 // This code can throw an OOM, but we do not want to fail in this case because
1511 // we must always successfully determine the target of a virtual call so that
1512 // CERs can work (there are a couple of exceptions to this involving generics).
1513 // Since the code below is just trying to see if a stub representing the current
1514 // type and token exist, it is not strictly necessary in determining the target.
1515 // We will treat the case of an OOM the same as the case of not finding an entry
1516 // in the hash tables and will continue on to the slow resolve case, which is
1517 // guaranteed not to fail outside of a couple of generics-specific cases.
1520 /////////////////////////////////////////////////////////////////////////////
1521 // First see if we can find a dispatcher stub for this token and type. If a
1522 // match is found, use the target stored in the entry.
1524 DispatchEntry entryD;
1525 Prober probeD(&entryD);
1526 if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1528 stub = (PCODE) dispatchers->Find(&probeD);
1529 if (stub != CALL_STUB_EMPTY_ENTRY)
1531 target = (PCODE)entryD.Target();
1537 /////////////////////////////////////////////////////////////////////////////////////
1538 // Second see if we can find a ResolveCacheElem for this token and type.
1539 // If a match is found, use the target stored in the entry.
1542 ResolveCacheElem * elem = NULL;
1543 ResolveCacheEntry entryRC;
1544 Prober probeRC(&entryRC);
1545 if (cache_entries->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeRC))
1547 elem = (ResolveCacheElem *)(cache_entries->Find(&probeRC));
1548 if (elem != CALL_STUB_EMPTY_ENTRY)
1550 target = (PCODE)entryRC.Target();
1559 EX_END_CATCH (SwallowAllExceptions);
1561 /////////////////////////////////////////////////////////////////////////////////////
1562 // If we failed to find a target in either the resolver or cache entry hash tables,
1563 // we need to perform a full resolution of the token and type.
1564 //@TODO: Would be nice to add assertion code to ensure we only ever call Resolver once per <token,type>.
1567 CONSISTENCY_CHECK(stub == CALL_STUB_EMPTY_ENTRY);
1568 patch = Resolver(objectType, token, protectedObj, &target, TRUE /* throwOnConflict */);
1571 if (!objectType->IsComObjectType()
1572 && !objectType->IsICastable()
1573 && !objectType->IsIDynamicInterfaceCastable())
1575 CONSISTENCY_CHECK(!MethodTable::GetMethodDescForSlotAddress(target)->IsGenericMethodDefinition());
1580 CONSISTENCY_CHECK(target != NULL);
1582 // Now that we've successfully determined the target, we will wrap the remaining logic in a giant
1583 // TRY/CATCH statement because it is there purely to emit stubs and cache entries. In the event
1584 // that emitting stub or cache entries throws an exception (for example, because of OOM), we should
1585 // not fail to perform the required dispatch. This is all because the basic assumption of
1586 // Constrained Execution Regions (CERs) is that all virtual method calls can be made without
1589 // NOTE: The THROWS contract for this method does not change, because there are still a few special
1590 // cases involving generics that can throw when trying to determine the target method. These cases
1591 // are exceptional and will be documented as unsupported for CERs.
1593 // NOTE: We do not try to keep track of the memory that has been allocated throughout this process
1594 // just so we can revert the memory should things fail. This is because we add the elements to the
1595 // hash tables and can be reused later on. Additionally, the hash tables are unlocked so we could
1596 // never remove the elements anyway.
1599 // If we're the shared domain, we can't burn a dispatch stub to the target
1600 // if that target is outside the shared domain (through virtuals
1601 // originating in the shared domain but overridden by a non-shared type and
1602 // called on a collection, like HashTable would call GetHashCode on an
1603 // arbitrary object in its colletion). Dispatch stubs would be hard to clean,
1604 // but resolve stubs are easy to clean because we just clean the cache.
1605 //@TODO: Figure out how to track these indirection cells so that in the
1606 //@TODO: future we can create dispatch stubs for this case.
1607 BOOL bCreateDispatchStub = !bCallToShorterLivedTarget;
1609 DispatchCache::InsertKind insertKind = DispatchCache::IK_NONE;
1615 // NOTE: This means that we are sharing dispatch stubs among callsites. If we decide we don't want
1616 // to do this in the future, just remove this condition
1617 if (stub == CALL_STUB_EMPTY_ENTRY)
1619 //we have a target but not the dispatcher stub, lets build it
1620 //First we need a failure target (the resolver stub)
1621 ResolveHolder *pResolveHolder = NULL;
1622 ResolveEntry entryR;
1623 Prober probeR(&entryR);
1624 PCODE pBackPatchFcn;
1628 // Only X86 implementation needs a BackPatch function
1629 pBackPatchFcn = (PCODE) GetEEFuncEntryPoint(BackPatchWorkerAsmStub);
1630 #else // !TARGET_X86
1631 pBackPatchFcn = NULL;
1632 #endif // !TARGET_X86
1635 pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerChainLookupAsmStub);
1636 #else // CHAIN_LOOKUP
1637 // Use the slow resolver
1638 pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerAsmStub);
1641 // First see if we've already created a resolve stub for this token
1642 if (resolvers->SetUpProber(token.To_SIZE_T(), 0, &probeR))
1644 // Find the right resolver, make it if necessary
1645 PCODE addrOfResolver = (PCODE)(resolvers->Find(&probeR));
1646 if (addrOfResolver == CALL_STUB_EMPTY_ENTRY)
1648 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
1649 MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, objectType);
1650 size_t stackArgumentsSize;
1652 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
1653 stackArgumentsSize = pMD->SizeOfArgStack();
1655 #endif // TARGET_X86 && !UNIX_X86_ABI
1657 pResolveHolder = GenerateResolveStub(pResolverFcn,
1660 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
1661 , stackArgumentsSize
1665 // Add the resolve entrypoint into the cache.
1666 //@TODO: Can we store a pointer to the holder rather than the entrypoint?
1667 resolvers->Add((size_t)(pResolveHolder->stub()->resolveEntryPoint()), &probeR);
1671 pResolveHolder = ResolveHolder::FromResolveEntry(addrOfResolver);
1673 CONSISTENCY_CHECK(CheckPointer(pResolveHolder));
1674 stub = pResolveHolder->stub()->resolveEntryPoint();
1675 CONSISTENCY_CHECK(stub != NULL);
1678 // Only create a dispatch stub if:
1679 // 1. We successfully created or found a resolve stub.
1680 // 2. We are not blocked from creating a dispatch stub.
1681 // 3. The call site is currently wired to a lookup stub. If the call site is wired
1682 // to anything else, then we're never going to use the dispatch stub so there's
1683 // no use in creating it.
1684 if (pResolveHolder != NULL && stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1686 DispatchEntry entryD;
1687 Prober probeD(&entryD);
1688 if (bCreateDispatchStub &&
1689 dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1691 // We are allowed to create a reusable dispatch stub for all assemblies
1692 // this allows us to optimize the call interception case the same way
1693 DispatchHolder *pDispatchHolder = NULL;
1694 PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
1695 if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
1697 PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
1698 bool reenteredCooperativeGCMode = false;
1699 pDispatchHolder = GenerateDispatchStub(
1700 target, addrOfFail, objectType, token.To_SIZE_T(), &reenteredCooperativeGCMode);
1701 if (reenteredCooperativeGCMode)
1703 // The prober may have been invalidated by reentering cooperative GC mode, reset it
1704 BOOL success = dispatchers->SetUpProber(token.To_SIZE_T(), (size_t)objectType, &probeD);
1707 dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
1711 pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
1714 // Now assign the entrypoint to stub
1715 CONSISTENCY_CHECK(CheckPointer(pDispatchHolder));
1716 stub = pDispatchHolder->stub()->entryPoint();
1717 CONSISTENCY_CHECK(stub != NULL);
1721 insertKind = DispatchCache::IK_SHARED;
1728 stats.worker_call_no_patch++;
1732 // When we get here, target is where to go to
1733 // and patch is TRUE, telling us that we may have to back patch the call site with stub
1734 if (stub != CALL_STUB_EMPTY_ENTRY)
1738 // If we go here and have a dispatching stub in hand, it probably means
1739 // that the cache used by the resolve stubs (g_resolveCache) does not have this stub,
1742 // We only insert into the cache if we have a ResolveStub or we have a DispatchStub
1743 // that missed, since we want to keep the resolve cache empty of unused entries.
1744 // If later the dispatch stub fails (because of another type at the call site),
1745 // we'll insert the new value into the cache for the next time.
1746 // Note that if we decide to skip creating a DispatchStub beacuise we are calling
1747 // from a shared to unshared domain the we also will insert into the cache.
1749 if (insertKind == DispatchCache::IK_NONE)
1751 if (stubKind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
1753 insertKind = DispatchCache::IK_DISPATCH;
1755 else if (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
1757 insertKind = DispatchCache::IK_RESOLVE;
1761 if (insertKind != DispatchCache::IK_NONE)
1763 VirtualCallStubManager * pMgrForCacheElem = this;
1765 // If we're calling from shared to unshared, make sure the cache element is
1766 // allocated in the unshared manager so that when the unshared code unloads
1767 // the cache element is unloaded.
1768 if (bCallToShorterLivedTarget)
1770 _ASSERTE(pCalleeMgr != NULL);
1771 pMgrForCacheElem = pCalleeMgr;
1774 // Find or create a new ResolveCacheElem
1775 ResolveCacheElem *e = pMgrForCacheElem->GetResolveCacheElem(objectType, token.To_SIZE_T(), (void *)target);
1777 // Try to insert this entry into the resolver cache table
1778 // When we get a collision we may decide not to insert this element
1779 // and Insert will return FALSE if we decided not to add the entry
1783 g_resolveCache->Insert(e, insertKind);
1786 if ((STUB_COLLIDE_MONO_PCT > 0) && !didInsert && (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB))
1788 // If we decided not to perform the insert and we came in with a resolve stub
1789 // then we currently have a polymorphic callsite, So we flip a coin to decide
1790 // whether to convert this callsite back into a dispatch stub (monomorphic callsite)
1792 if (!bCallToShorterLivedTarget && bCreateDispatchStub)
1794 // We are allowed to create a reusable dispatch stub for all assemblies
1795 // this allows us to optimize the call interception case the same way
1797 UINT32 coin = UINT32(GetRandomInt(100));
1799 if (coin < STUB_COLLIDE_MONO_PCT)
1801 DispatchEntry entryD;
1802 Prober probeD(&entryD);
1803 if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1805 DispatchHolder *pDispatchHolder = NULL;
1806 PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
1807 if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
1809 // It is possible that we never created this monomorphic dispatch stub
1810 // so we may have to create it now
1811 ResolveHolder* pResolveHolder = ResolveHolder::FromResolveEntry(pCallSite->GetSiteTarget());
1812 PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
1813 bool reenteredCooperativeGCMode = false;
1814 pDispatchHolder = GenerateDispatchStub(
1815 target, addrOfFail, objectType, token.To_SIZE_T(), &reenteredCooperativeGCMode);
1816 if (reenteredCooperativeGCMode)
1818 // The prober may have been invalidated by reentering cooperative GC mode, reset it
1819 BOOL success = dispatchers->SetUpProber(token.To_SIZE_T(), (size_t)objectType, &probeD);
1822 dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
1826 pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
1829 // increment the of times we changed a cache collision into a mono stub
1830 stats.worker_collide_to_mono++;
1832 // Now assign the entrypoint to stub
1833 CONSISTENCY_CHECK(pDispatchHolder != NULL);
1834 stub = pDispatchHolder->stub()->entryPoint();
1835 CONSISTENCY_CHECK(stub != NULL);
1840 #endif // STUB_LOGGING
1843 if (stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1845 BackPatchSite(pCallSite, (PCODE)stub);
1852 EX_END_CATCH (SwallowAllExceptions);
1854 // Target can be NULL only if we can't resolve to an address
1855 _ASSERTE(target != NULL);
1861 Resolve the token in the context of the method table, and set the target to point to
1862 the address that we should go to get to the implementation. Return a boolean indicating
1863 whether or not this is a permenent choice or a temporary choice. For example, if the code has
1864 not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
1865 it means that the token is not resolvable.
1868 VirtualCallStubManager::Resolver(
1870 DispatchToken token,
1871 OBJECTREF * protectedObj, // this one can actually be NULL, consider using pMT is you don't need the object itself
1873 BOOL throwOnConflict)
1878 PRECONDITION(CheckPointer(pMT));
1879 PRECONDITION(TypeHandle(pMT).CheckFullyLoaded());
1883 MethodTable * dbg_pTokenMT = pMT;
1884 MethodDesc * dbg_pTokenMD = NULL;
1885 if (token.IsTypedToken())
1887 dbg_pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
1888 dbg_pTokenMD = dbg_pTokenMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
1892 // NOTE: CERs are not hardened against transparent proxy types,
1893 // so no need to worry about throwing an exception from here.
1895 LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (start) looking up %s method in %s\n",
1896 token.IsThisToken() ? "this" : "interface",
1897 pMT->GetClass()->GetDebugClassName()));
1899 MethodDesc * pMD = NULL;
1900 BOOL fShouldPatch = FALSE;
1901 DispatchSlot implSlot(pMT->FindDispatchSlot(token.GetTypeID(), token.GetSlotNumber(), throwOnConflict));
1903 // If we found a target, then just figure out if we're allowed to create a stub around
1904 // this target and backpatch the callsite.
1905 if (!implSlot.IsNull())
1907 #if defined(LOGGING) || defined(_DEBUG)
1909 pMD = implSlot.GetMethodDesc();
1912 // Make sure we aren't crossing app domain boundaries
1913 CONSISTENCY_CHECK(GetAppDomain()->CheckValidModule(pMD->GetModule()));
1915 WORD slot = pMD->GetSlot();
1916 BOOL fIsOverriddenMethod =
1917 (pMT->GetNumParentVirtuals() <= slot && slot < pMT->GetNumVirtuals());
1918 LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (end) looked up %s %s method %s::%s\n",
1919 fIsOverriddenMethod ? "overridden" : "newslot",
1920 token.IsThisToken() ? "this" : "interface",
1921 pMT->GetClass()->GetDebugClassName(),
1926 #endif // defined(LOGGING) || defined(_DEBUG)
1928 BOOL fSlotCallsPrestub = DoesSlotCallPrestub(implSlot.GetTarget());
1929 if (!fSlotCallsPrestub)
1931 // Skip fixup precode jump for better perf
1932 PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(implSlot.GetTarget());
1933 if (pDirectTarget != NULL)
1934 implSlot = DispatchSlot(pDirectTarget);
1936 // Only patch to a target if it's not going to call the prestub.
1937 fShouldPatch = TRUE;
1941 // Getting the MethodDesc is very expensive,
1942 // so only call this when we are calling the prestub
1943 pMD = implSlot.GetMethodDesc();
1947 // pMD can be NULL when another thread raced in and patched the Method Entry Point
1948 // so that it no longer points at the prestub
1949 // In such a case DoesSlotCallPrestub will now return FALSE
1950 CONSISTENCY_CHECK(!DoesSlotCallPrestub(implSlot.GetTarget()));
1951 fSlotCallsPrestub = FALSE;
1954 if (!fSlotCallsPrestub)
1956 // Only patch to a target if it's not going to call the prestub.
1957 fShouldPatch = TRUE;
1961 CONSISTENCY_CHECK(CheckPointer(pMD));
1962 if (pMD->IsGenericMethodDefinition())
1964 //@GENERICS: Currently, generic virtual methods are called only through JIT_VirtualFunctionPointer
1965 // and so we could never have a virtual call stub at a call site for a generic virtual.
1966 // As such, we're assuming the only callers to Resolver are calls to GetTarget caused
1967 // indirectly by JIT_VirtualFunctionPointer. So, we're return TRUE for patching so that
1968 // we can cache the result in GetTarget and we don't have to perform the full resolve
1969 // every time. If the way we call generic virtual methods changes, this will also need
1971 fShouldPatch = TRUE;
1976 #ifdef FEATURE_COMINTEROP
1977 else if (pMT->IsComObjectType() && IsInterfaceToken(token))
1979 MethodTable * pItfMT = GetTypeFromToken(token);
1980 implSlot = pItfMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict);
1982 _ASSERTE(!pItfMT->HasInstantiation());
1984 fShouldPatch = TRUE;
1986 #endif // FEATURE_COMINTEROP
1987 #ifdef FEATURE_ICASTABLE
1988 else if (pMT->IsICastable() && protectedObj != NULL && *protectedObj != NULL)
1990 GCStress<cfg_any>::MaybeTrigger();
1992 // In case of ICastable, instead of trying to find method implementation in the real object type
1993 // we call Resolver() again with whatever type it returns.
1994 // It allows objects that implement ICastable to mimic behavior of other types.
1995 MethodTable * pTokenMT = GetTypeFromToken(token);
1997 // Make call to ICastableHelpers.GetImplType(this, interfaceTypeObj)
1998 PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICASTABLEHELPERS__GETIMPLTYPE);
2000 OBJECTREF tokenManagedType = pTokenMT->GetManagedClassObject(); //GC triggers
2002 DECLARE_ARGHOLDER_ARRAY(args, 2);
2003 args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*protectedObj);
2004 args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(tokenManagedType);
2006 OBJECTREF impTypeObj = NULL;
2007 CALL_MANAGED_METHOD_RETREF(impTypeObj, OBJECTREF, args);
2009 INDEBUG(tokenManagedType = NULL); //tokenManagedType wasn't protected during the call
2010 if (impTypeObj == NULL) // GetImplType returns default(RuntimeTypeHandle)
2012 COMPlusThrow(kEntryPointNotFoundException);
2015 ReflectClassBaseObject* resultTypeObj = ((ReflectClassBaseObject*)OBJECTREFToObject(impTypeObj));
2016 TypeHandle resultTypeHnd = resultTypeObj->GetType();
2017 MethodTable *pResultMT = resultTypeHnd.GetMethodTable();
2019 return Resolver(pResultMT, token, protectedObj, ppTarget, throwOnConflict);
2021 #endif // FEATURE_ICASTABLE
2022 else if (pMT->IsIDynamicInterfaceCastable()
2023 && protectedObj != NULL
2024 && *protectedObj != NULL
2025 && IsInterfaceToken(token))
2027 MethodTable *pTokenMT = GetTypeFromToken(token);
2029 OBJECTREF implTypeRef = DynamicInterfaceCastable::GetInterfaceImplementation(protectedObj, TypeHandle(pTokenMT));
2030 _ASSERTE(implTypeRef != NULL);
2032 ReflectClassBaseObject *implTypeObj = ((ReflectClassBaseObject *)OBJECTREFToObject(implTypeRef));
2033 TypeHandle implTypeHandle = implTypeObj->GetType();
2034 return Resolver(implTypeHandle.GetMethodTable(), token, protectedObj, ppTarget, throwOnConflict);
2037 if (implSlot.IsNull())
2039 MethodTable * pTokenMT = NULL;
2040 MethodDesc * pTokenMD = NULL;
2041 if (token.IsTypedToken())
2043 pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2044 pTokenMD = pTokenMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
2047 #ifdef FEATURE_COMINTEROP
2048 if ((pTokenMT != NULL) && (pTokenMT->GetClass()->IsEquivalentType()))
2051 DefineFullyQualifiedNameForClassW();
2052 pTokenMD->GetFullMethodInfo(methodName);
2054 COMPlusThrowHR(COR_E_MISSINGMETHOD, COR_E_MISSINGMETHOD, GetFullyQualifiedNameForClassNestedAwareW(pMT), methodName.GetUnicode());
2057 #endif // FEATURE_COMINTEROP
2058 if (!throwOnConflict)
2060 // Assume we got null because there was a default interface method conflict
2066 // Method not found. In the castable object scenario where the method is being resolved on an interface itself,
2067 // this can happen if the user tried to call a method without a default implementation. Outside of that case,
2068 // this should never happen for anything but equivalent types
2069 CONSISTENCY_CHECK((!implSlot.IsNull() || pMT->IsInterface()) && "Valid method implementation was not found.");
2070 COMPlusThrow(kEntryPointNotFoundException);
2074 *ppTarget = implSlot.GetTarget();
2076 return fShouldPatch;
2077 } // VirtualCallStubManager::Resolver
2079 #endif // !DACCESS_COMPILE
2081 //----------------------------------------------------------------------------
2082 // Given a contract, return true if the contract represents a slot on the target.
2083 BOOL VirtualCallStubManager::IsClassToken(DispatchToken token)
2089 RETURN (token.IsThisToken());
2092 //----------------------------------------------------------------------------
2093 // Given a contract, return true if the contract represents an interface, false if just a slot.
2094 BOOL VirtualCallStubManager::IsInterfaceToken(DispatchToken token)
2100 BOOL ret = token.IsTypedToken();
2101 // For now, only interfaces have typed dispatch tokens.
2102 CONSISTENCY_CHECK(!ret || CheckPointer(GetThread()->GetDomain()->LookupType(token.GetTypeID())));
2103 CONSISTENCY_CHECK(!ret || GetThread()->GetDomain()->LookupType(token.GetTypeID())->IsInterface());
2107 #ifndef DACCESS_COMPILE
2109 //----------------------------------------------------------------------------
2111 VirtualCallStubManager::GetRepresentativeMethodDescFromToken(
2112 DispatchToken token,
2115 CONTRACT (MethodDesc *) {
2119 PRECONDITION(CheckPointer(pMT));
2120 POSTCONDITION(CheckPointer(RETVAL));
2123 // This is called when trying to create a HelperMethodFrame, which means there are
2124 // potentially managed references on the stack that are not yet protected.
2127 if (token.IsTypedToken())
2129 pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2130 CONSISTENCY_CHECK(CheckPointer(pMT));
2131 token = DispatchToken::CreateDispatchToken(token.GetSlotNumber());
2133 CONSISTENCY_CHECK(token.IsThisToken());
2134 RETURN (pMT->GetMethodDescForSlot(token.GetSlotNumber()));
2137 //----------------------------------------------------------------------------
2138 MethodTable *VirtualCallStubManager::GetTypeFromToken(DispatchToken token)
2142 WRAPPER(GC_TRIGGERS);
2144 MethodTable *pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2145 _ASSERTE(pMT != NULL);
2146 _ASSERTE(pMT->LookupTypeID() == token.GetTypeID());
2150 #endif // !DACCESS_COMPILE
2152 //----------------------------------------------------------------------------
2153 MethodDesc *VirtualCallStubManager::GetInterfaceMethodDescFromToken(DispatchToken token)
2157 WRAPPER(GC_TRIGGERS);
2158 PRECONDITION(IsInterfaceToken(token));
2161 #ifndef DACCESS_COMPILE
2163 MethodTable * pMT = GetTypeFromToken(token);
2164 PREFIX_ASSUME(pMT != NULL);
2165 CONSISTENCY_CHECK(CheckPointer(pMT));
2166 return pMT->GetMethodDescForSlot(token.GetSlotNumber());
2168 #else // DACCESS_COMPILE
2173 #endif // DACCESS_COMPILE
2176 #ifndef DACCESS_COMPILE
2178 //----------------------------------------------------------------------------
2179 // This will check to see if a match is in the cache.
2180 // Returns the target on success, otherwise NULL.
2181 PCODE VirtualCallStubManager::CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT)
2186 PRECONDITION(CheckPointer(pMT));
2189 // Now look in the cache for a match
2190 ResolveCacheElem *pElem = g_resolveCache->Lookup(token, tokenHash, pMT);
2192 // If the element matches, return the target - we're done!
2193 return (PCODE)(pElem != NULL ? pElem->target : NULL);
2197 //----------------------------------------------------------------------------
2200 VirtualCallStubManager::GetTarget(
2201 DispatchToken token,
2203 BOOL throwOnConflict)
2208 INJECT_FAULT(COMPlusThrowOM(););
2209 PRECONDITION(CheckPointer(pMT));
2214 if (token.IsThisToken())
2216 return pMT->GetRestoredSlot(token.GetSlotNumber());
2219 GCX_COOP(); // This is necessary for BucketTable synchronization
2221 PCODE target = NULL;
2223 #ifndef STUB_DISPATCH_PORTABLE
2224 target = CacheLookup(token.To_SIZE_T(), DispatchCache::INVALID_HASH, pMT);
2227 #endif // !STUB_DISPATCH_PORTABLE
2229 // No match, now do full resolve
2232 // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
2233 // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
2234 fPatch = Resolver(pMT, token, NULL, &target, throwOnConflict);
2235 _ASSERTE(!throwOnConflict || target != NULL);
2237 #ifndef STUB_DISPATCH_PORTABLE
2240 ResolveCacheElem *pCacheElem = pMT->GetLoaderAllocator()->GetVirtualCallStubManager()->
2241 GetResolveCacheElem(pMT, token.To_SIZE_T(), (BYTE *)target);
2245 if (!g_resolveCache->Insert(pCacheElem, DispatchCache::IK_EXTERNAL))
2247 // We decided not to perform the insert
2253 g_external_call_no_patch++;
2255 #endif // !STUB_DISPATCH_PORTABLE
2260 #endif // !DACCESS_COMPILE
2262 //----------------------------------------------------------------------------
2264 Resolve the token in the context of the method table, and set the target to point to
2265 the address that we should go to get to the implementation. Return a boolean indicating
2266 whether or not this is a permenent choice or a temporary choice. For example, if the code has
2267 not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
2268 it means that the token is not resolvable.
2271 VirtualCallStubManager::TraceResolver(
2273 DispatchToken token,
2274 TraceDestination * trace)
2279 PRECONDITION(CheckPointer(pObj, NULL_OK));
2280 INJECT_FAULT(COMPlusThrowOM(););
2283 // If someone is trying to step into a stub dispatch call on a null object,
2284 // just say that we can't trace this call and we'll just end up throwing
2285 // a null ref exception.
2291 MethodTable *pMT = pObj->GetMethodTable();
2292 CONSISTENCY_CHECK(CheckPointer(pMT));
2294 DispatchSlot slot(pMT->FindDispatchSlot(token.GetTypeID(), token.GetSlotNumber(), FALSE /* throwOnConflict */));
2295 if (slot.IsNull() && IsInterfaceToken(token) && pMT->IsComObjectType())
2297 MethodDesc * pItfMD = GetInterfaceMethodDescFromToken(token);
2298 CONSISTENCY_CHECK(pItfMD->GetMethodTable()->GetSlot(pItfMD->GetSlot()) == pItfMD->GetMethodEntryPoint());
2300 // Look up the slot on the interface itself.
2301 slot = pItfMD->GetMethodTable()->FindDispatchSlot(TYPE_ID_THIS_CLASS, pItfMD->GetSlot(), FALSE /* throwOnConflict */);
2304 // The dispatch slot's target may change due to code versioning shortly after it was retrieved above for the trace. This
2305 // will result in the debugger getting some version of the code or the prestub, but not necessarily the exact code pointer
2306 // that winds up getting executed. The debugger has code that handles this ambiguity by placing a breakpoint at the start of
2307 // all native code versions, even if they aren't the one that was reported by this trace, see
2308 // DebuggerController::PatchTrace() under case TRACE_MANAGED. This alleviates the StubManager from having to prevent the
2309 // race that occurs here.
2311 // If the dispatch slot is null, we assume it's because of a diamond case in default interface method dispatch.
2312 return slot.IsNull() ? FALSE : (StubManager::TraceStub(slot.GetTarget(), trace));
2315 #ifndef DACCESS_COMPILE
2317 //----------------------------------------------------------------------------
2318 /* Change the call site. It is failing the expected MT test in the dispatcher stub
2321 void VirtualCallStubManager::BackPatchWorker(StubCallSite* pCallSite)
2329 PCODE callSiteTarget = pCallSite->GetSiteTarget();
2331 if (isDispatchingStubStatic(callSiteTarget))
2333 DispatchHolder * dispatchHolder = DispatchHolder::FromDispatchEntry(callSiteTarget);
2334 DispatchStub * dispatchStub = dispatchHolder->stub();
2336 //yes, patch it to point to the resolve stub
2337 //We can ignore the races now since we now know that the call site does go thru our
2338 //stub mechanisms, hence no matter who wins the race, we are correct.
2339 //We find the correct resolve stub by following the failure path in the dispatcher stub itself
2340 PCODE failEntry = dispatchStub->failTarget();
2341 ResolveStub* resolveStub = ResolveHolder::FromFailEntry(failEntry)->stub();
2342 PCODE resolveEntry = resolveStub->resolveEntryPoint();
2343 BackPatchSite(pCallSite, resolveEntry);
2345 LOG((LF_STUBS, LL_INFO10000, "BackPatchWorker call-site" FMT_ADDR "dispatchStub" FMT_ADDR "\n",
2346 DBG_ADDR(pCallSite->GetReturnAddress()), DBG_ADDR(dispatchHolder->stub())));
2348 //Add back the default miss count to the counter being used by this resolve stub
2349 //Since resolve stub are shared among many dispatch stubs each dispatch stub
2350 //that fails decrements the shared counter and the dispatch stub that trips the
2351 //counter gets converted into a polymorphic site
2352 INT32* counter = resolveStub->pCounter();
2353 *counter += STUB_MISS_COUNT_VALUE;
2357 //----------------------------------------------------------------------------
2358 /* consider changing the call site to point to stub, if appropriate do it
2360 void VirtualCallStubManager::BackPatchSite(StubCallSite* pCallSite, PCODE stub)
2366 PRECONDITION(stub != NULL);
2367 PRECONDITION(CheckPointer(pCallSite));
2368 PRECONDITION(pCallSite->GetSiteTarget() != NULL);
2373 // This will take care of the prejit case and find the actual patch site
2374 PCODE prior = pCallSite->GetSiteTarget();
2376 //is this really going to change anything, if not don't do it.
2380 //we only want to do the following transitions for right now:
2382 // lookup dispatching or resolving
2383 // dispatching resolving
2384 if (isResolvingStubStatic(prior))
2387 if(isDispatchingStubStatic(stub))
2389 if(isDispatchingStubStatic(prior))
2395 stats.site_write_mono++;
2400 stats.site_write_poly++;
2403 //patch the call site
2404 pCallSite->SetSiteTarget(patch);
2409 //----------------------------------------------------------------------------
2410 void StubCallSite::SetSiteTarget(PCODE newTarget)
2412 WRAPPER_NO_CONTRACT;
2413 PTR_PCODE pCell = GetIndirectCell();
2417 //----------------------------------------------------------------------------
2418 /* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2419 are the addresses the stub is to transfer to depending on the test with pMTExpected
2421 DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE addrOfCode,
2424 size_t dispatchToken,
2425 bool * pMayHaveReenteredCooperativeGCMode)
2427 CONTRACT (DispatchHolder*) {
2430 INJECT_FAULT(COMPlusThrowOM(););
2431 PRECONDITION(addrOfCode != NULL);
2432 PRECONDITION(addrOfFail != NULL);
2433 PRECONDITION(CheckPointer(pMTExpected));
2434 PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2435 PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2436 POSTCONDITION(CheckPointer(RETVAL));
2439 size_t dispatchHolderSize = sizeof(DispatchHolder);
2442 // See comment around m_fShouldAllocateLongJumpDispatchStubs for explanation.
2443 if (m_fShouldAllocateLongJumpDispatchStubs
2444 INDEBUG(|| g_pConfig->ShouldGenerateLongJumpDispatchStub()))
2446 RETURN GenerateDispatchStubLong(addrOfCode,
2450 pMayHaveReenteredCooperativeGCMode);
2453 dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT);
2456 //allocate from the requisite heap and copy the template over it.
2457 DispatchHolder * holder = (DispatchHolder*) (void*)
2458 dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
2461 if (!DispatchHolder::CanShortJumpDispatchStubReachFailTarget(addrOfFail, (LPCBYTE)holder))
2463 m_fShouldAllocateLongJumpDispatchStubs = TRUE;
2464 RETURN GenerateDispatchStub(addrOfCode, addrOfFail, pMTExpected, dispatchToken, pMayHaveReenteredCooperativeGCMode);
2468 ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
2469 dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
2473 , DispatchStub::e_TYPE_SHORT
2477 #ifdef FEATURE_CODE_VERSIONING
2478 MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
2479 if (pMD->IsVersionableWithVtableSlotBackpatch())
2481 EntryPointSlots::SlotType slotType;
2482 TADDR slot = holder->stub()->implTargetSlot(&slotType);
2483 pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
2485 // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2486 *pMayHaveReenteredCooperativeGCMode = true;
2490 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2493 stats.stub_mono_counter++;
2494 stats.stub_space += (UINT32)dispatchHolderSize;
2495 LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2496 DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2498 #ifdef FEATURE_PERFMAP
2499 PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2506 //----------------------------------------------------------------------------
2507 /* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2508 are the addresses the stub is to transfer to depending on the test with pMTExpected
2510 DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE addrOfCode,
2513 size_t dispatchToken,
2514 bool * pMayHaveReenteredCooperativeGCMode)
2516 CONTRACT (DispatchHolder*) {
2519 INJECT_FAULT(COMPlusThrowOM(););
2520 PRECONDITION(addrOfCode != NULL);
2521 PRECONDITION(addrOfFail != NULL);
2522 PRECONDITION(CheckPointer(pMTExpected));
2523 PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2524 PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2525 POSTCONDITION(CheckPointer(RETVAL));
2528 //allocate from the requisite heap and copy the template over it.
2529 size_t dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG);
2530 DispatchHolder * holder = (DispatchHolder*) (void*)dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
2531 ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
2533 dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
2535 (size_t)pMTExpected,
2536 DispatchStub::e_TYPE_LONG);
2538 #ifdef FEATURE_CODE_VERSIONING
2539 MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
2540 if (pMD->IsVersionableWithVtableSlotBackpatch())
2542 EntryPointSlots::SlotType slotType;
2543 TADDR slot = holder->stub()->implTargetSlot(&slotType);
2544 pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
2546 // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2547 *pMayHaveReenteredCooperativeGCMode = true;
2551 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2554 stats.stub_mono_counter++;
2555 stats.stub_space += static_cast<UINT32>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG));
2556 LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2557 DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2559 #ifdef FEATURE_PERFMAP
2560 PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2567 //----------------------------------------------------------------------------
2568 /* Generate a resolve stub for the given dispatchToken.
2569 addrOfResolver is where to go if the inline cache check misses
2570 addrOfPatcher is who to call if the fail piece is being called too often by dispacher stubs
2572 ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addrOfResolver,
2573 PCODE addrOfPatcher,
2574 size_t dispatchToken
2575 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
2576 , size_t stackArgumentsSize
2580 CONTRACT (ResolveHolder*) {
2583 INJECT_FAULT(COMPlusThrowOM(););
2584 PRECONDITION(addrOfResolver != NULL);
2585 #if defined(TARGET_X86)
2586 PRECONDITION(addrOfPatcher != NULL);
2588 POSTCONDITION(CheckPointer(RETVAL));
2591 _ASSERTE(addrOfResolver);
2593 //get a counter for the fail piece
2595 UINT32 counter_index = counter_block::MAX_COUNTER_ENTRIES;
2596 counter_block *cur_block = NULL;
2600 cur_block = VolatileLoad(&m_cur_counter_block);
2602 if ((cur_block != NULL) && (cur_block->used < counter_block::MAX_COUNTER_ENTRIES))
2604 counter_index = InterlockedIncrement((LONG*)&cur_block->used) - 1;
2605 if (counter_index < counter_block::MAX_COUNTER_ENTRIES)
2607 // Typical case we allocate the next free counter in the block
2612 // Otherwise we have to create a new counter_block to serve as the head of m_cur_counter_block list
2614 // Create the new block in the main heap
2615 counter_block *pNew = new counter_block;
2617 // Initialize the new block
2618 pNew->next = cur_block;
2621 // Try to link in the new block
2622 if (InterlockedCompareExchangeT(&m_cur_counter_block, pNew, cur_block) != cur_block)
2624 // Lost a race to add pNew as new head
2629 CONSISTENCY_CHECK(counter_index < counter_block::MAX_COUNTER_ENTRIES);
2630 CONSISTENCY_CHECK(CheckPointer(cur_block));
2632 // Initialize the default miss counter for this resolve stub
2633 INT32* counterAddr = &(cur_block->block[counter_index]);
2634 *counterAddr = STUB_MISS_COUNT_VALUE;
2636 //allocate from the requisite heap and copy the templates for each piece over it.
2637 ResolveHolder * holder = (ResolveHolder*) (void*)
2638 resolve_heap->AllocAlignedMem(sizeof(ResolveHolder), CODE_SIZE_ALIGN);
2639 ExecutableWriterHolder<ResolveHolder> resolveWriterHolder(holder, sizeof(ResolveHolder));
2641 resolveWriterHolder.GetRW()->Initialize(holder,
2642 addrOfResolver, addrOfPatcher,
2643 dispatchToken, DispatchCache::HashToken(dispatchToken),
2644 g_resolveCache->GetCacheBaseAddr(), counterAddr
2645 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
2646 , stackArgumentsSize
2649 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2652 stats.stub_poly_counter++;
2653 stats.stub_space += sizeof(ResolveHolder)+sizeof(size_t);
2654 LOG((LF_STUBS, LL_INFO10000, "GenerateResolveStub for token" FMT_ADDR "at" FMT_ADDR "\n",
2655 DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2657 #ifdef FEATURE_PERFMAP
2658 PerfMap::LogStubs(__FUNCTION__, "GenerateResolveStub", (PCODE)holder->stub(), holder->stub()->size());
2664 //----------------------------------------------------------------------------
2665 /* Generate a lookup stub for the given dispatchToken. addrOfResolver is where the stub always transfers control
2667 LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken)
2669 CONTRACT (LookupHolder*) {
2672 INJECT_FAULT(COMPlusThrowOM(););
2673 PRECONDITION(addrOfResolver != NULL);
2674 POSTCONDITION(CheckPointer(RETVAL));
2677 //allocate from the requisite heap and copy the template over it.
2678 LookupHolder * holder = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
2679 ExecutableWriterHolder<LookupHolder> lookupWriterHolder(holder, sizeof(LookupHolder));
2681 lookupWriterHolder.GetRW()->Initialize(holder, addrOfResolver, dispatchToken);
2682 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2685 stats.stub_lookup_counter++;
2686 stats.stub_space += sizeof(LookupHolder);
2687 LOG((LF_STUBS, LL_INFO10000, "GenerateLookupStub for token" FMT_ADDR "at" FMT_ADDR "\n",
2688 DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2690 #ifdef FEATURE_PERFMAP
2691 PerfMap::LogStubs(__FUNCTION__, "GenerateLookupStub", (PCODE)holder->stub(), holder->stub()->size());
2697 //----------------------------------------------------------------------------
2698 /* Generate a cache entry
2700 ResolveCacheElem *VirtualCallStubManager::GenerateResolveCacheElem(void *addrOfCode,
2703 bool *pMayHaveReenteredCooperativeGCMode)
2709 INJECT_FAULT(COMPlusThrowOM(););
2710 PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2711 PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2715 CONSISTENCY_CHECK(CheckPointer(pMTExpected));
2717 //allocate from the requisite heap and set the appropriate fields
2718 ResolveCacheElem *e = (ResolveCacheElem*) (void*)
2719 cache_entry_heap->AllocAlignedMem(sizeof(ResolveCacheElem), CODE_SIZE_ALIGN);
2721 e->pMT = pMTExpected;
2723 e->target = addrOfCode;
2727 #ifdef FEATURE_CODE_VERSIONING
2728 MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress((PCODE)addrOfCode);
2729 if (pMD->IsVersionableWithVtableSlotBackpatch())
2731 pMD->RecordAndBackpatchEntryPointSlot(
2734 EntryPointSlots::SlotType_Normal);
2736 // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2737 *pMayHaveReenteredCooperativeGCMode = true;
2742 stats.cache_entry_counter++;
2743 stats.cache_entry_space += sizeof(ResolveCacheElem);
2748 //------------------------------------------------------------------
2749 // Adds the stub manager to our linked list of virtual stub managers
2750 // and adds to the global list.
2751 //------------------------------------------------------------------
2752 void VirtualCallStubManagerManager::AddStubManager(VirtualCallStubManager *pMgr)
2754 WRAPPER_NO_CONTRACT;
2756 SimpleWriteLockHolder lh(&m_RWLock);
2758 pMgr->m_pNext = m_pManagers;
2761 STRESS_LOG2(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
2762 "VirtualCallStubManagerManager::AddStubManager - 0x%p (vptr 0x%p)\n", pMgr, (*(PVOID*)pMgr));
2765 //------------------------------------------------------------------
2766 // Removes the stub manager from our linked list of virtual stub
2767 // managers and fromthe global list.
2768 //------------------------------------------------------------------
2769 void VirtualCallStubManagerManager::RemoveStubManager(VirtualCallStubManager *pMgr)
2780 SimpleWriteLockHolder lh(&m_RWLock);
2782 // Remove this manager from our list.
2783 for (VirtualCallStubManager **pCur = &m_pManagers;
2785 pCur = &((*pCur)->m_pNext))
2788 *pCur = (*pCur)->m_pNext;
2791 // Make sure we don't have a residual pointer left over.
2792 m_pCacheElem = NULL;
2794 STRESS_LOG1(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
2795 "VirtualCallStubManagerManager::RemoveStubManager - 0x%p\n", pMgr);
2798 //------------------------------------------------------------------
2799 // Logs stub usage statistics
2800 //------------------------------------------------------------------
2801 void VirtualCallStubManager::LogStats()
2803 STATIC_CONTRACT_NOTHROW;
2804 STATIC_CONTRACT_GC_NOTRIGGER;
2805 STATIC_CONTRACT_FORBID_FAULT;
2807 // Our Init routine assignes all fields atomically so testing one field should suffice to
2808 // test whehter the Init succeeded.
2814 // Temp space to use for formatting the output.
2815 static const int FMT_STR_SIZE = 160;
2816 char szPrintStr[FMT_STR_SIZE];
2819 if (g_hStubLogFile && (stats.site_write != 0))
2822 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", stats.site_counter);
2823 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2824 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write", stats.site_write);
2825 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2826 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", stats.site_write_mono);
2827 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2828 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", stats.site_write_poly);
2829 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2831 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub data\r\n");
2832 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2834 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", stats.stub_lookup_counter);
2835 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2836 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", stats.stub_mono_counter);
2837 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2838 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", stats.stub_poly_counter);
2839 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2840 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", stats.stub_space);
2841 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2844 g_resolveCache->GetLoadFactor(&total, &used);
2846 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_SIZE, "cache_entry_used", used);
2847 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2848 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", stats.cache_entry_counter);
2849 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2850 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", stats.cache_entry_space);
2851 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2853 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache_load:\t%zu used, %zu total, utilization %#5.2f%%\r\n",
2854 used, total, 100.0 * double(used) / double(total));
2855 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2858 resolvers->LogStats();
2859 dispatchers->LogStats();
2860 lookups->LogStats();
2861 vtableCallers->LogStats();
2862 cache_entries->LogStats();
2864 g_site_counter += stats.site_counter;
2865 g_stub_lookup_counter += stats.stub_lookup_counter;
2866 g_stub_poly_counter += stats.stub_poly_counter;
2867 g_stub_mono_counter += stats.stub_mono_counter;
2868 g_stub_vtable_counter += stats.stub_vtable_counter;
2869 g_site_write += stats.site_write;
2870 g_site_write_poly += stats.site_write_poly;
2871 g_site_write_mono += stats.site_write_mono;
2872 g_worker_call += stats.worker_call;
2873 g_worker_call_no_patch += stats.worker_call_no_patch;
2874 g_worker_collide_to_mono += stats.worker_collide_to_mono;
2875 g_stub_space += stats.stub_space;
2876 g_cache_entry_counter += stats.cache_entry_counter;
2877 g_cache_entry_space += stats.cache_entry_space;
2879 stats.site_counter = 0;
2880 stats.stub_lookup_counter = 0;
2881 stats.stub_poly_counter = 0;
2882 stats.stub_mono_counter = 0;
2883 stats.stub_vtable_counter = 0;
2884 stats.site_write = 0;
2885 stats.site_write_poly = 0;
2886 stats.site_write_mono = 0;
2887 stats.worker_call = 0;
2888 stats.worker_call_no_patch = 0;
2889 stats.worker_collide_to_mono = 0;
2890 stats.stub_space = 0;
2891 stats.cache_entry_counter = 0;
2892 stats.cache_entry_space = 0;
2895 void Prober::InitProber(size_t key1, size_t key2, size_t* table)
2907 base = &table[CALL_STUB_FIRST_INDEX];
2908 mask = table[CALL_STUB_MASK_INDEX];
2912 size_t Prober::Find()
2921 //if this prober has already visited every slot, there is nothing more to look at.
2922 //note, this means that if a prober is going to be reused, the FormHash() function
2923 //needs to be called to reset it.
2925 return CALL_STUB_EMPTY_ENTRY;
2930 //if we hit an empty entry, it means it cannot be in the table
2931 if(entry==CALL_STUB_EMPTY_ENTRY)
2933 return CALL_STUB_EMPTY_ENTRY;
2936 //we have a real entry, see if it is the one we want using our comparer
2937 comparer->SetContents(entry);
2938 if (comparer->Equals(keyA, keyB))
2942 } while(Next()); //Next() returns false when we have visited every slot
2943 return CALL_STUB_EMPTY_ENTRY;
2946 size_t Prober::Add(size_t newEntry)
2955 //if we have visited every slot then there is no room in the table to add this new entry
2957 return CALL_STUB_EMPTY_ENTRY;
2962 if (entry==CALL_STUB_EMPTY_ENTRY)
2964 //it's not in the table and we have the correct empty slot in hand
2965 //in which to add it.
2966 //try and grab it, if we succeed we break out to add the entry
2967 //if we fail, it means a racer swoped in a wrote in
2968 //this slot, so we will just keep looking
2969 if (GrabEntry(newEntry))
2974 // We didn't grab this entry, so keep trying.
2977 //check if this entry is already in the table, if so we are done
2978 comparer->SetContents(entry);
2979 if (comparer->Equals(keyA, keyB))
2983 } while(Next()); //Next() returns false when we have visited every slot
2985 //if we have visited every slot then there is no room in the table to add this new entry
2987 return CALL_STUB_EMPTY_ENTRY;
2989 CONSISTENCY_CHECK(Read() == newEntry);
2993 /*Atomically grab an entry, if it is empty, so we can write in it.
2994 @TODO: It is not clear if this routine is actually necessary and/or if the
2995 interlocked compare exchange is necessary as opposed to just a read write with racing allowed.
2996 If we didn't have it, all that would happen is potentially more duplicates or
2997 dropped entries, and we are supposed to run correctly even if they
2998 happen. So in a sense this is a perf optimization, whose value has
2999 not been measured, i.e. it might be faster without it.
3001 BOOL Prober::GrabEntry(size_t entryValue)
3003 LIMITED_METHOD_CONTRACT;
3005 return InterlockedCompareExchangeT(&base[index],
3006 entryValue, static_cast<size_t>(CALL_STUB_EMPTY_ENTRY)) == CALL_STUB_EMPTY_ENTRY;
3009 inline void FastTable::IncrementCount()
3011 LIMITED_METHOD_CONTRACT;
3013 // This MUST be an interlocked increment, since BucketTable::GetMoreSpace relies on
3014 // the return value of FastTable::isFull to tell it whether or not to continue with
3015 // trying to allocate a new FastTable. If two threads race and try to increment this
3016 // at the same time and one increment is lost, then the size will be inaccurate and
3017 // BucketTable::GetMoreSpace will never succeed, resulting in an infinite loop trying
3018 // to add a new entry.
3019 InterlockedIncrement((LONG *)&contents[CALL_STUB_COUNT_INDEX]);
3022 size_t FastTable::Add(size_t entry, Prober* probe)
3030 size_t result = probe->Add(entry);
3031 if (result == entry) IncrementCount();
3035 size_t FastTable::Find(Prober* probe)
3037 WRAPPER_NO_CONTRACT;
3039 return probe->Find();
3042 /*Increase the size of the bucket referenced by the prober p and copy the existing members into it.
3043 Since duplicates and lost entries are okay, we can build the larger table
3044 and then try to swap it in. If it turns out that somebody else is racing us,
3045 the worst that will happen is we drop a few entries on the floor, which is okay.
3046 If by chance we swap out a table that somebody else is inserting an entry into, that
3047 is okay too, just another dropped entry. If we detect dups, we just drop them on
3049 BOOL BucketTable::GetMoreSpace(const Prober* p)
3054 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3055 INJECT_FAULT(COMPlusThrowOM(););
3058 //get ahold of the current bucket
3059 Prober probe(p->comparer);
3060 size_t index = ComputeBucketIndex(p->keyA, p->keyB);
3062 FastTable* oldBucket = (FastTable*) Read(index);
3064 if (!oldBucket->isFull())
3068 //make a larger bucket
3070 if (oldBucket->tableSize() == CALL_STUB_MIN_ENTRIES)
3072 numEntries = CALL_STUB_SECONDARY_ENTRIES;
3076 numEntries = oldBucket->tableSize()*CALL_STUB_GROWTH_FACTOR;
3079 FastTable* newBucket = FastTable::MakeTable(numEntries);
3081 //copy via insertion from the old to the new bucket
3082 size_t* limit = &oldBucket->contents[(oldBucket->tableSize())+CALL_STUB_FIRST_INDEX];
3084 for (e = &oldBucket->contents[CALL_STUB_FIRST_INDEX]; e<limit; e++)
3087 if (moved == CALL_STUB_EMPTY_ENTRY)
3091 probe.comparer->SetContents(moved);
3092 probe.InitProber(probe.comparer->KeyA(), probe.comparer->KeyB(), &newBucket->contents[0]);
3093 //if the new bucket fills up, give up (this should never happen I think)
3094 if (newBucket->Add(moved, &probe) == CALL_STUB_EMPTY_ENTRY)
3096 _ASSERTE(!"This should never happen");
3101 // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3102 // replacing the entry, then we will just put the new bucket we just created in the
3103 // dead list instead of risking a race condition which would put a duplicate of the old
3104 // bucket in the dead list (and even possibly cause a cyclic list).
3105 if (InterlockedCompareExchangeT(reinterpret_cast<FastTable * volatile *>(&buckets[index]), newBucket, oldBucket) != oldBucket)
3106 oldBucket = newBucket;
3108 // Link the old onto the "to be reclaimed" list.
3109 // Use the dead link field of the abandoned buckets to form the list
3112 list = VolatileLoad(&dead);
3113 oldBucket->contents[CALL_STUB_DEAD_LINK] = (size_t) list;
3114 } while (InterlockedCompareExchangeT(&dead, oldBucket, list) != list);
3118 // Validate correctness of the list
3119 FastTable *curr = oldBucket;
3122 FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3126 next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3127 _ASSERTE(curr != next); // Make sure we don't have duplicates
3128 _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3135 //update our counters
3136 stats.bucket_space_dead += UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3137 stats.bucket_space -= UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3138 stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3142 void BucketTable::Reclaim()
3153 //reclaim the dead (abandoned) buckets on the dead list
3154 // The key issue is to not reclaim the list if any thread is in a stub or
3155 // if any thread is accessing (read or write) the cache tables. So we will declare
3156 // those points to be non-gc safe points, and reclaim when the gc syncs the threads
3157 //@TODO: add an assert to ensure we are at a gc safe point
3158 FastTable* list = dead;
3160 //see if there is anything to do.
3161 //We ignore the race, since we will just pick them up on the next go around
3162 if (list == NULL) return;
3164 //Try and grab the list exclusively, if we fail, it means that either somebody
3165 //else grabbed it, or something go added. In either case we just give up and assume
3166 //we will catch it on the next go around.
3167 //we use an interlock here in case we are called during shutdown not at a gc safe point
3168 //in which case the race is between several threads wanting to reclaim.
3169 //We are assuming that we are assuming the actually having to do anything is rare
3170 //so that the interlocked overhead is acceptable. If this is not true, then
3171 //we need to examine exactly how and when we may be called during shutdown.
3172 if (InterlockedCompareExchangeT(&dead, NULL, list) != list)
3176 // Validate correctness of the list
3177 FastTable *curr = list;
3180 FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3184 next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3185 _ASSERTE(curr != next); // Make sure we don't have duplicates
3186 _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3192 //we now have the list off by ourself, so we can just walk and cleanup
3195 size_t next = list->contents[CALL_STUB_DEAD_LINK];
3197 list = (FastTable*) next;
3202 // When using SetUpProber the proper values to use for keyA, keyB are:
3205 //-------------------------------------------------------
3206 // lookups token the stub calling convention
3207 // dispatchers token the expected MT
3208 // resolver token the stub calling convention
3209 // cache_entries token the expected method table
3210 // vtableCallers token unused (zero)
3212 BOOL BucketTable::SetUpProber(size_t keyA, size_t keyB, Prober *prober)
3217 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3218 INJECT_FAULT(COMPlusThrowOM(););
3221 // The buckets[index] table starts off initialized to all CALL_STUB_EMPTY_ENTRY
3222 // and we should write each buckets[index] exactly once. However in a multi-proc
3223 // scenario each processor could see old memory values that would cause us to
3226 // Since this is a fairly hot code path and it is very rare for buckets[index]
3227 // to be CALL_STUB_EMPTY_ENTRY, we can first try a non-volatile read and then
3228 // if it looks like we need to create a new FastTable we double check by doing
3231 // Note that BucketTable::GetMoreSpace also updates buckets[index] when the FastTable
3232 // grows to 90% full. (CALL_STUB_LOAD_FACTOR is 90%)
3234 size_t index = ComputeBucketIndex(keyA, keyB);
3235 size_t bucket = buckets[index]; // non-volatile read
3236 if (bucket==CALL_STUB_EMPTY_ENTRY)
3238 bucket = Read(index); // volatile read
3241 if (bucket==CALL_STUB_EMPTY_ENTRY)
3243 FastTable* newBucket = FastTable::MakeTable(CALL_STUB_MIN_ENTRIES);
3245 // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3246 // replacing the entry, then we will free the new bucket we just created.
3247 bucket = InterlockedCompareExchangeT(&buckets[index], reinterpret_cast<size_t>(newBucket), static_cast<size_t>(CALL_STUB_EMPTY_ENTRY));
3248 if (bucket == CALL_STUB_EMPTY_ENTRY)
3250 // We successfully wrote newBucket into buckets[index], overwritting the CALL_STUB_EMPTY_ENTRY value
3251 stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3252 bucket = (size_t) newBucket;
3256 // Someone else wrote buckets[index] before us
3257 // and bucket contains the value that they wrote
3258 // We must free the memory that we allocated
3259 // and we will use the value that someone else wrote
3261 newBucket = (FastTable*) bucket;
3265 return ((FastTable*)(bucket))->SetUpProber(keyA, keyB, prober);
3268 size_t BucketTable::Add(size_t entry, Prober* probe)
3273 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3274 INJECT_FAULT(COMPlusThrowOM(););
3277 FastTable* table = (FastTable*)(probe->items());
3278 size_t result = table->Add(entry,probe);
3279 if (result != CALL_STUB_EMPTY_ENTRY)
3283 //we must have missed count(s) and the table is now full, so lets
3284 //grow and retry (this should be rare)
3285 if (!GetMoreSpace(probe)) return CALL_STUB_EMPTY_ENTRY;
3286 if (!SetUpProber(probe->keyA, probe->keyB, probe)) return CALL_STUB_EMPTY_ENTRY;
3287 return Add(entry, probe); //recurse in for the retry to write the entry
3290 void BucketTable::LogStats()
3292 LIMITED_METHOD_CONTRACT;
3295 g_bucket_space += stats.bucket_space;
3296 g_bucket_space_dead += stats.bucket_space_dead;
3298 stats.bucket_space = 0;
3299 stats.bucket_space_dead = 0;
3302 DispatchCache::DispatchCache()
3304 : m_writeLock(CrstStubDispatchCache, CRST_UNSAFE_ANYMODE)
3311 INJECT_FAULT(COMPlusThrowOM());
3315 //initialize the cache to be empty, i.e. all slots point to the empty entry
3316 ResolveCacheElem* e = new ResolveCacheElem();
3317 e->pMT = (void *) (-1); //force all method tables to be misses
3318 e->pNext = NULL; // null terminate the chain for the empty entry
3320 for (int i = 0;i<CALL_STUB_CACHE_SIZE;i++)
3323 // Initialize statistics
3324 memset(&stats, 0, sizeof(stats));
3326 memset(&cacheData, 0, sizeof(cacheData));
3330 ResolveCacheElem* DispatchCache::Lookup(size_t token, UINT16 tokenHash, void* mt)
3332 WRAPPER_NO_CONTRACT;
3333 if (tokenHash == INVALID_HASH)
3334 tokenHash = HashToken(token);
3335 UINT16 idx = HashMT(tokenHash, mt);
3336 ResolveCacheElem *pCurElem = GetCacheEntry(idx);
3338 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3339 BOOL chainedLookup = FALSE;
3341 // No need to conditionlize on CHAIN_LOOKUP, since this loop
3342 // will only run once when CHAIN_LOOKUP is undefined, since
3343 // there will only ever be one element in a bucket (chain of 1).
3344 while (pCurElem != empty) {
3345 if (pCurElem->Equals(token, mt)) {
3348 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3349 // Only want to inc the counter once per chain search.
3350 if (pCurElem == GetCacheEntry(idx)) {
3351 chainedLookup = TRUE;
3352 g_chained_lookup_external_call_counter++;
3354 #endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3355 pCurElem = pCurElem->Next();
3357 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3358 if (chainedLookup) {
3359 g_chained_lookup_external_miss_counter++;
3361 #endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3362 return NULL; /* with chain lookup disabled this returns NULL */
3365 // returns true if we wrote the resolver cache entry with the new elem
3366 // also returns true if the cache entry already contained elem (the miss case)
3368 BOOL DispatchCache::Insert(ResolveCacheElem* elem, InsertKind insertKind)
3374 PRECONDITION(insertKind != IK_NONE);
3378 CrstHolder lh(&m_writeLock);
3381 // Figure out what bucket this element belongs in
3382 UINT16 tokHash = HashToken(elem->token);
3383 UINT16 hash = HashMT(tokHash, elem->pMT);
3388 BOOL collide = FALSE;
3391 elem->debug_hash = tokHash;
3392 elem->debug_index = idx;
3395 ResolveCacheElem* cell = GetCacheEntry(idx);
3398 // There is the possibility of a race where two threads will
3399 // try to generate a ResolveCacheElem for the same tuple, and
3400 // the first thread will get the lock and insert the element
3401 // and the second thread coming in should detect this and not
3402 // re-add the element, since it is already likely at the start
3403 // of the list, and would result in the element looping to
3405 if (Lookup(elem->token, tokHash, elem->pMT))
3406 #else // !CHAIN_LOOKUP
3408 #endif // !CHAIN_LOOKUP
3421 CONSISTENCY_CHECK(!(hit && miss));
3423 // If we didn't have a miss or a hit then we had a collision with
3424 // a non-empty entry in our resolver cache
3430 // Always insert the entry into the chain
3432 #else // !CHAIN_LOOKUP
3434 if (STUB_COLLIDE_WRITE_PCT < 100)
3436 UINT32 coin = UINT32(GetRandomInt(100));
3438 write = (coin < STUB_COLLIDE_WRITE_PCT);
3445 #endif // !CHAIN_LOOKUP
3451 // We create a list with the last pNext pointing at empty
3453 #else // !CHAIN_LOOKUP
3454 elem->pNext = empty;
3455 #endif // !CHAIN_LOOKUP
3456 SetCacheEntry(idx, elem);
3457 stats.insert_cache_write++;
3460 LOG((LF_STUBS, LL_INFO1000, "%8s Insert(token" FMT_ADDR "MethodTable" FMT_ADDR ") at [%03x] %7s %5s \n",
3461 (insertKind == IK_DISPATCH) ? "Dispatch" : (insertKind == IK_RESOLVE) ? "Resolve" : "External",
3462 DBG_ADDR(elem->token), DBG_ADDR(elem->pMT), hash,
3463 hit ? "HIT" : miss ? "MISS" : "COLLIDE", write ? "WRITE" : "KEEP"));
3465 if (insertKind == IK_DISPATCH)
3466 stats.insert_cache_dispatch++;
3467 else if (insertKind == IK_RESOLVE)
3468 stats.insert_cache_resolve++;
3469 else if (insertKind == IK_SHARED)
3470 stats.insert_cache_shared++;
3471 else if (insertKind == IK_EXTERNAL)
3472 stats.insert_cache_external++;
3475 stats.insert_cache_hit++;
3477 stats.insert_cache_miss++;
3479 stats.insert_cache_collide++;
3481 return write || miss;
3485 void DispatchCache::PromoteChainEntry(ResolveCacheElem* elem)
3493 CrstHolder lh(&m_writeLock);
3494 g_chained_entry_promoted++;
3496 // Figure out what bucket this element belongs in
3497 UINT16 tokHash = HashToken(elem->token);
3498 UINT16 hash = HashMT(tokHash, elem->pMT);
3501 ResolveCacheElem *curElem = GetCacheEntry(idx);
3503 // If someone raced in and promoted this element before us,
3504 // then we can just return. Furthermore, it would be an
3505 // error if we performed the below code, since we'd end up
3506 // with a self-referential element and an infinite loop.
3507 if (curElem == elem)
3512 // Now loop through the chain to find the element that is
3513 // point to the element we're promoting so we can remove
3514 // it from the chain.
3515 while (curElem->Next() != elem)
3517 curElem = curElem->pNext;
3518 CONSISTENCY_CHECK(curElem != NULL);
3521 // Remove the element from the chain
3522 CONSISTENCY_CHECK(curElem->pNext == elem);
3523 curElem->pNext = elem->pNext;
3525 // Set the promoted entry to the head of the list.
3526 elem->pNext = GetCacheEntry(idx);
3527 SetCacheEntry(idx, elem);
3529 #endif // CHAIN_LOOKUP
3531 void DispatchCache::LogStats()
3533 LIMITED_METHOD_CONTRACT;
3535 g_insert_cache_external += stats.insert_cache_external;
3536 g_insert_cache_shared += stats.insert_cache_shared;
3537 g_insert_cache_dispatch += stats.insert_cache_dispatch;
3538 g_insert_cache_resolve += stats.insert_cache_resolve;
3539 g_insert_cache_hit += stats.insert_cache_hit;
3540 g_insert_cache_miss += stats.insert_cache_miss;
3541 g_insert_cache_collide += stats.insert_cache_collide;
3542 g_insert_cache_write += stats.insert_cache_write;
3544 stats.insert_cache_external = 0;
3545 stats.insert_cache_shared = 0;
3546 stats.insert_cache_dispatch = 0;
3547 stats.insert_cache_resolve = 0;
3548 stats.insert_cache_hit = 0;
3549 stats.insert_cache_miss = 0;
3550 stats.insert_cache_collide = 0;
3551 stats.insert_cache_write = 0;
3554 /* The following tablse have bits that have the following properties:
3555 1. Each entry has 12-bits with 5,6 or 7 one bits and 5,6 or 7 zero bits.
3556 2. For every bit we try to have half one bits and half zero bits
3557 3. Adjacent entries when xor-ed should have 5,6 or 7 bits that are different
3560 static const UINT16 tokenHashBits[64] =
3561 #else // !HOST_64BIT
3562 static const UINT16 tokenHashBits[32] =
3563 #endif // !HOST_64BIT
3565 0xcd5, 0x8b9, 0x875, 0x439,
3566 0xbf0, 0x38d, 0xa5b, 0x6a7,
3567 0x78a, 0x9c8, 0xee2, 0x3d3,
3568 0xd94, 0x54e, 0x698, 0xa6a,
3569 0x753, 0x932, 0x4b7, 0x155,
3570 0x3a7, 0x9c8, 0x4e9, 0xe0b,
3571 0xf05, 0x994, 0x472, 0x626,
3572 0x15c, 0x3a8, 0x56e, 0xe2d,
3575 0xe3c, 0xbe2, 0x58e, 0x0f3,
3576 0x54d, 0x70f, 0xf88, 0xe2b,
3577 0x353, 0x153, 0x4a5, 0x943,
3578 0xaf2, 0x88f, 0x72e, 0x978,
3579 0xa13, 0xa0b, 0xc3c, 0xb72,
3580 0x0f7, 0x49a, 0xdd0, 0x366,
3581 0xd84, 0xba5, 0x4c5, 0x6bc,
3582 0x8ec, 0x0b9, 0x617, 0x85c,
3583 #endif // HOST_64BIT
3586 /*static*/ UINT16 DispatchCache::HashToken(size_t token)
3588 LIMITED_METHOD_CONTRACT;
3593 // Note if you change the number of bits in CALL_STUB_CACHE_NUM_BITS
3594 // then we have to recompute the hash function
3595 // Though making the number of bits smaller should still be OK
3596 static_assert_no_msg(CALL_STUB_CACHE_NUM_BITS <= 12);
3601 hash ^= tokenHashBits[index];
3606 _ASSERTE((hash & ~CALL_STUB_CACHE_MASK) == 0);
3610 /////////////////////////////////////////////////////////////////////////////////////////////
3611 DispatchCache::Iterator::Iterator(DispatchCache *pCache) : m_pCache(pCache), m_curBucket(-1)
3616 PRECONDITION(CheckPointer(pCache));
3619 // Move to the first valid entry
3623 /////////////////////////////////////////////////////////////////////////////////////////////
3624 void DispatchCache::Iterator::Next()
3635 // Move to the next element in the chain
3636 m_ppCurElem = &((*m_ppCurElem)->pNext);
3638 // If the next element was the empty sentinel entry, move to the next valid bucket.
3639 if (*m_ppCurElem == m_pCache->empty) {
3644 /////////////////////////////////////////////////////////////////////////////////////////////
3645 // This doesn't actually delete the entry, it just unlinks it from the chain.
3646 // Returns the unlinked entry.
3647 ResolveCacheElem *DispatchCache::Iterator::UnlinkEntry()
3652 CONSISTENCY_CHECK(IsValid());
3654 ResolveCacheElem *pUnlinkedEntry = *m_ppCurElem;
3655 *m_ppCurElem = (*m_ppCurElem)->pNext;
3656 pUnlinkedEntry->pNext = m_pCache->empty;
3657 // If unlinking this entry took us to the end of this bucket, need to move to the next.
3658 if (*m_ppCurElem == m_pCache->empty) {
3661 return pUnlinkedEntry;
3664 /////////////////////////////////////////////////////////////////////////////////////////////
3665 void DispatchCache::Iterator::NextValidBucket()
3670 CONSISTENCY_CHECK(IsValid());
3673 // Move to the next bucket that contains a cache entry
3676 } while (IsValid() && *m_ppCurElem == m_pCache->empty);
3679 #endif // !DACCESS_COMPILE
3681 /////////////////////////////////////////////////////////////////////////////////////////////
3682 VirtualCallStubManager *VirtualCallStubManagerManager::FindVirtualCallStubManager(PCODE stubAddress)
3691 return VirtualCallStubManager::FindStubManager(stubAddress);
3694 static VirtualCallStubManager * const IT_START = (VirtualCallStubManager *)(-1);
3696 /////////////////////////////////////////////////////////////////////////////////////////////
3697 // Move to the next element. Iterators are created at
3698 // start-1, so must call Next before using Current
3699 BOOL VirtualCallStubManagerIterator::Next()
3701 LIMITED_METHOD_DAC_CONTRACT;
3707 else if (m_pCurMgr != NULL)
3709 m_pCurMgr = m_pCurMgr->m_pNext;
3712 return (m_pCurMgr != NULL);
3715 /////////////////////////////////////////////////////////////////////////////////////////////
3716 // Get the current contents of the iterator
3717 VirtualCallStubManager *VirtualCallStubManagerIterator::Current()
3719 LIMITED_METHOD_DAC_CONTRACT;
3720 CONSISTENCY_CHECK(!m_fIsStart);
3721 CONSISTENCY_CHECK(CheckPointer(m_pCurMgr));
3726 #ifndef DACCESS_COMPILE
3727 /////////////////////////////////////////////////////////////////////////////////////////////
3728 VirtualCallStubManagerManager::VirtualCallStubManagerManager()
3729 : m_pManagers(NULL),
3731 m_RWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT)
3733 LIMITED_METHOD_CONTRACT;
3736 /////////////////////////////////////////////////////////////////////////////////////////////
3738 void VirtualCallStubManagerManager::InitStatic()
3740 STANDARD_VM_CONTRACT;
3742 CONSISTENCY_CHECK(g_pManager == NULL);
3743 g_pManager = new VirtualCallStubManagerManager();
3747 /////////////////////////////////////////////////////////////////////////////////////////////
3748 VirtualCallStubManagerIterator VirtualCallStubManagerManager::IterateVirtualCallStubManagers()
3750 WRAPPER_NO_CONTRACT;
3753 VirtualCallStubManagerIterator it(VirtualCallStubManagerManager::GlobalManager());
3757 /////////////////////////////////////////////////////////////////////////////////////////////
3758 BOOL VirtualCallStubManagerManager::CheckIsStub_Internal(
3759 PCODE stubStartAddress)
3761 WRAPPER_NO_CONTRACT;
3764 // Forwarded to from RangeSectionStubManager
3768 /////////////////////////////////////////////////////////////////////////////////////////////
3769 BOOL VirtualCallStubManagerManager::DoTraceStub(
3770 PCODE stubStartAddress,
3771 TraceDestination *trace)
3773 WRAPPER_NO_CONTRACT;
3775 // Find the owning manager. We should succeed, since presumably someone already
3776 // called CheckIsStub on us to find out that we own the address, and already
3777 // called TraceManager to initiate a trace.
3778 VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
3779 CONSISTENCY_CHECK(CheckPointer(pMgr));
3781 return pMgr->DoTraceStub(stubStartAddress, trace);
3784 #ifndef DACCESS_COMPILE
3785 /////////////////////////////////////////////////////////////////////////////////////////////
3786 MethodDesc *VirtualCallStubManagerManager::Entry2MethodDesc(
3787 PCODE stubStartAddress,
3794 INJECT_FAULT(COMPlusThrowOM(););
3801 StubCodeBlockKind sk = STUB_CODE_BLOCK_UNKNOWN;
3803 // Find the owning manager.
3804 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(stubStartAddress, &sk);
3808 // Do the full resolve
3809 DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, stubStartAddress, sk));
3811 PCODE target = NULL;
3812 // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
3813 // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
3814 VirtualCallStubManager::Resolver(pMT, token, NULL, &target, TRUE /* throwOnConflict */);
3816 return pMT->GetMethodDescForSlotAddress(target);
3820 #ifdef DACCESS_COMPILE
3821 void VirtualCallStubManagerManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
3824 WRAPPER_NO_CONTRACT;
3825 VirtualCallStubManagerIterator it = IterateVirtualCallStubManagers();
3828 it.Current()->DoEnumMemoryRegions(flags);
3833 //----------------------------------------------------------------------------
3834 BOOL VirtualCallStubManagerManager::TraceManager(
3835 Thread *thread, TraceDestination *trace,
3836 T_CONTEXT *pContext, BYTE **pRetAddr)
3838 WRAPPER_NO_CONTRACT;
3840 // Find the owning manager. We should succeed, since presumably someone already
3841 // called CheckIsStub on us to find out that we own the address.
3842 VirtualCallStubManager *pMgr = FindVirtualCallStubManager(GetIP(pContext));
3843 CONSISTENCY_CHECK(CheckPointer(pMgr));
3845 // Forward the call to the appropriate manager.
3846 return pMgr->TraceManager(thread, trace, pContext, pRetAddr);