e82f8b84a580b048d139e48e466547aa9c8b18a8
[platform/upstream/dotnet/runtime.git] / src / coreclr / vm / virtualcallstub.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 //
4 // File: VirtualCallStub.CPP
5 //
6 // This file contains the virtual call stub manager and caches
7 //
8
9
10
11 //
12
13 //
14 // ============================================================================
15
16 #include "common.h"
17 #include "array.h"
18
19 #ifdef FEATURE_PERFMAP
20 #include "perfmap.h"
21 #endif
22
23 #ifndef DACCESS_COMPILE
24
25 //@TODO: make these conditional on whether logs are being produced
26 //instrumentation counters
27 UINT32 g_site_counter = 0;              //# of call sites
28 UINT32 g_site_write = 0;                //# of call site backpatch writes
29 UINT32 g_site_write_poly = 0;           //# of call site backpatch writes to point to resolve stubs
30 UINT32 g_site_write_mono = 0;           //# of call site backpatch writes to point to dispatch stubs
31
32 UINT32 g_stub_lookup_counter = 0;       //# of lookup stubs
33 UINT32 g_stub_mono_counter = 0;         //# of dispatch stubs
34 UINT32 g_stub_poly_counter = 0;         //# of resolve stubs
35 UINT32 g_stub_vtable_counter = 0;       //# of vtable call stubs
36 UINT32 g_stub_space = 0;                //# of bytes of stubs
37
38 UINT32 g_reclaim_counter = 0;           //# of times a ReclaimAll was performed
39
40 UINT32 g_worker_call = 0;               //# of calls into ResolveWorker
41 UINT32 g_worker_call_no_patch = 0;
42 UINT32 g_worker_collide_to_mono = 0;    //# of times we converted a poly stub to a mono stub instead of writing the cache entry
43
44 UINT32 g_external_call = 0;             //# of calls into GetTarget(token, pMT)
45 UINT32 g_external_call_no_patch = 0;
46
47 UINT32 g_insert_cache_external = 0;     //# of times Insert was called for IK_EXTERNAL
48 UINT32 g_insert_cache_shared = 0;       //# of times Insert was called for IK_SHARED
49 UINT32 g_insert_cache_dispatch = 0;     //# of times Insert was called for IK_DISPATCH
50 UINT32 g_insert_cache_resolve = 0;      //# of times Insert was called for IK_RESOLVE
51 UINT32 g_insert_cache_hit = 0;          //# of times Insert found an empty cache entry
52 UINT32 g_insert_cache_miss = 0;         //# of times Insert already had a matching cache entry
53 UINT32 g_insert_cache_collide = 0;      //# of times Insert found a used cache entry
54 UINT32 g_insert_cache_write = 0;        //# of times Insert wrote a cache entry
55
56 UINT32 g_cache_entry_counter = 0;       //# of cache structs
57 UINT32 g_cache_entry_space = 0;         //# of bytes used by cache lookup structs
58
59 UINT32 g_call_lookup_counter = 0;       //# of times lookup stubs entered
60
61 UINT32 g_mono_call_counter = 0;         //# of time dispatch stubs entered
62 UINT32 g_mono_miss_counter = 0;         //# of times expected MT did not match actual MT (dispatch stubs)
63
64 UINT32 g_poly_call_counter = 0;         //# of times resolve stubs entered
65 UINT32 g_poly_miss_counter = 0;         //# of times cache missed (resolve stub)
66
67 UINT32 g_chained_lookup_call_counter = 0;   //# of hits in a chained lookup
68 UINT32 g_chained_lookup_miss_counter = 0;   //# of misses in a chained lookup
69
70 UINT32 g_chained_lookup_external_call_counter = 0;   //# of hits in an external chained lookup
71 UINT32 g_chained_lookup_external_miss_counter = 0;   //# of misses in an external chained lookup
72
73 UINT32 g_chained_entry_promoted = 0;    //# of times a cache entry is promoted to the start of the chain
74
75 UINT32 g_bucket_space = 0;              //# of bytes in caches and tables, not including the stubs themselves
76 UINT32 g_bucket_space_dead = 0;         //# of bytes of abandoned buckets not yet recycled
77
78 #endif // !DACCESS_COMPILE
79
80 // This is the number of times a successful chain lookup will occur before the
81 // entry is promoted to the front of the chain. This is declared as extern because
82 // the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header.
83 #ifdef TARGET_ARM64
84 extern "C" size_t g_dispatch_cache_chain_success_counter;
85 #else
86 extern size_t g_dispatch_cache_chain_success_counter;
87 #endif
88
89 #define DECLARE_DATA
90 #include "virtualcallstub.h"
91 #undef DECLARE_DATA
92 #include "profilepriv.h"
93 #include "contractimpl.h"
94 #include "dynamicinterfacecastable.h"
95
96 SPTR_IMPL_INIT(VirtualCallStubManagerManager, VirtualCallStubManagerManager, g_pManager, NULL);
97
98 #ifndef DACCESS_COMPILE
99
100 #ifdef STUB_LOGGING
101 UINT32 STUB_MISS_COUNT_VALUE = 100;
102 UINT32 STUB_COLLIDE_WRITE_PCT = 100;
103 UINT32 STUB_COLLIDE_MONO_PCT  =   0;
104 #endif // STUB_LOGGING
105
106 FastTable::NumCallStubs_t FastTable::NumCallStubs;
107
108 FastTable* BucketTable::dead = NULL;    //linked list of the abandoned buckets
109
110 DispatchCache *g_resolveCache = NULL;    //cache of dispatch stubs for in line lookup by resolve stubs.
111
112 size_t g_dispatch_cache_chain_success_counter = CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT;
113
114 #ifdef STUB_LOGGING
115 UINT32 g_resetCacheCounter;
116 UINT32 g_resetCacheIncr;
117 UINT32 g_dumpLogCounter;
118 UINT32 g_dumpLogIncr;
119 #endif // STUB_LOGGING
120
121 //@TODO: use the existing logging mechanisms.  for now we write to a file.
122 HANDLE g_hStubLogFile;
123
124 void VirtualCallStubManager::StartupLogging()
125 {
126     CONTRACTL
127     {
128         NOTHROW;
129         GC_TRIGGERS;
130         FORBID_FAULT;
131     }
132     CONTRACTL_END
133
134     GCX_PREEMP();
135
136     EX_TRY
137     {
138         FAULT_NOT_FATAL(); // We handle filecreation problems locally
139         SString str;
140         str.Printf("StubLog_%d.log", GetCurrentProcessId());
141         g_hStubLogFile = WszCreateFile (str.GetUnicode(),
142                                         GENERIC_WRITE,
143                                         0,
144                                         0,
145                                         CREATE_ALWAYS,
146                                         FILE_ATTRIBUTE_NORMAL,
147                                         0);
148     }
149     EX_CATCH
150     {
151     }
152     EX_END_CATCH(SwallowAllExceptions)
153
154     if (g_hStubLogFile == INVALID_HANDLE_VALUE) {
155         g_hStubLogFile = NULL;
156     }
157 }
158
159 #define OUTPUT_FORMAT_INT "\t%-30s %d\r\n"
160 #define OUTPUT_FORMAT_SIZE "\t%-30s %zu\r\n"
161 #define OUTPUT_FORMAT_PCT "\t%-30s %#5.2f%%\r\n"
162 #define OUTPUT_FORMAT_INT_PCT "\t%-30s %5d (%#5.2f%%)\r\n"
163
164 void VirtualCallStubManager::LoggingDump()
165 {
166     CONTRACTL
167     {
168         NOTHROW;
169         GC_TRIGGERS;
170         FORBID_FAULT;
171     }
172     CONTRACTL_END
173
174     VirtualCallStubManagerIterator it =
175         VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
176
177     while (it.Next())
178     {
179         it.Current()->LogStats();
180     }
181
182     g_resolveCache->LogStats();
183
184     // Temp space to use for formatting the output.
185     static const int FMT_STR_SIZE = 160;
186     char szPrintStr[FMT_STR_SIZE];
187     DWORD dwWriteByte;
188
189     if(g_hStubLogFile)
190     {
191 #ifdef STUB_LOGGING
192         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub tuning parameters\r\n");
193         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
194
195         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d  (0x%02x)\r\n", "STUB_MISS_COUNT_VALUE",
196                 STUB_MISS_COUNT_VALUE, STUB_MISS_COUNT_VALUE);
197         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
198         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_WRITE_PCT",
199                 STUB_COLLIDE_WRITE_PCT, STUB_COLLIDE_WRITE_PCT);
200         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
201         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_MONO_PCT",
202                 STUB_COLLIDE_MONO_PCT, STUB_COLLIDE_MONO_PCT);
203         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
204         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogCounter",
205                 g_dumpLogCounter, g_dumpLogCounter);
206         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
207         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogIncr",
208                 g_dumpLogCounter, g_dumpLogIncr);
209         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
210         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheCounter",
211                 g_resetCacheCounter, g_resetCacheCounter);
212         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
213         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheIncr",
214                 g_resetCacheCounter, g_resetCacheIncr);
215         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
216 #endif // STUB_LOGGING
217
218         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nsite data\r\n");
219         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
220
221         //output counters
222         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", g_site_counter);
223         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
224         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write", g_site_write);
225         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
226         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", g_site_write_mono);
227         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
228         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", g_site_write_poly);
229         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
230
231         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "reclaim_counter", g_reclaim_counter);
232         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
233
234         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub data\r\n");
235         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
236
237         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", g_stub_lookup_counter);
238         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
239         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", g_stub_mono_counter);
240         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
241         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", g_stub_poly_counter);
242         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
243         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_vtable_counter", g_stub_vtable_counter);
244         WriteFile(g_hStubLogFile, szPrintStr, (DWORD)strlen(szPrintStr), &dwWriteByte, NULL);
245         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", g_stub_space);
246         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
247
248 #ifdef STUB_LOGGING
249
250         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nlookup stub data\r\n");
251         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
252
253         UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
254
255         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "lookup_call_counter", g_call_lookup_counter);
256         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
257
258         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "total stub dispatch calls", total_calls);
259         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
260
261         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "mono stub data",
262                 100.0 * double(g_mono_call_counter)/double(total_calls));
263         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
264
265         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "mono_call_counter", g_mono_call_counter);
266         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
267         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "mono_miss_counter", g_mono_miss_counter);
268         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
269         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
270                 100.0 * double(g_mono_miss_counter)/double(g_mono_call_counter));
271         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
272
273         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "poly stub data",
274                 100.0 * double(g_poly_call_counter)/double(total_calls));
275         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
276
277         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "poly_call_counter", g_poly_call_counter);
278         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
279         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "poly_miss_counter", g_poly_miss_counter);
280         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
281         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
282                 100.0 * double(g_poly_miss_counter)/double(g_poly_call_counter));
283         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
284 #endif // STUB_LOGGING
285
286 #ifdef CHAIN_LOOKUP
287         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nchain lookup data\r\n");
288         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
289
290 #ifdef STUB_LOGGING
291         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_call_counter", g_chained_lookup_call_counter);
292         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
293         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_miss_counter", g_chained_lookup_miss_counter);
294         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
295         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
296                 100.0 * double(g_chained_lookup_miss_counter)/double(g_chained_lookup_call_counter));
297         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
298
299         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_call_counter", g_chained_lookup_external_call_counter);
300         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
301         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_miss_counter", g_chained_lookup_external_miss_counter);
302         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
303         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
304                 100.0 * double(g_chained_lookup_external_miss_counter)/double(g_chained_lookup_external_call_counter));
305         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
306 #endif // STUB_LOGGING
307         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "chained_entry_promoted", g_chained_entry_promoted);
308         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
309 #endif // CHAIN_LOOKUP
310
311 #ifdef STUB_LOGGING
312         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "worker (slow resolver) data",
313                 100.0 * double(g_worker_call)/double(total_calls));
314 #else // !STUB_LOGGING
315         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nworker (slow resolver) data\r\n");
316 #endif // !STUB_LOGGING
317                 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
318
319         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_call", g_worker_call);
320         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
321         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_call_no_patch", g_worker_call_no_patch);
322         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
323         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "external_call", g_external_call);
324         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
325         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "external_call_no_patch", g_external_call_no_patch);
326         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
327         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "worker_collide_to_mono", g_worker_collide_to_mono);
328         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
329
330         UINT32 total_inserts = g_insert_cache_external
331                              + g_insert_cache_shared
332                              + g_insert_cache_dispatch
333                              + g_insert_cache_resolve;
334
335         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n%-30s %d\r\n", "insert cache data", total_inserts);
336         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
337
338         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_external", g_insert_cache_external,
339                 100.0 * double(g_insert_cache_external)/double(total_inserts));
340         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
341         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_shared", g_insert_cache_shared,
342                 100.0 * double(g_insert_cache_shared)/double(total_inserts));
343         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
344         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_dispatch", g_insert_cache_dispatch,
345                 100.0 * double(g_insert_cache_dispatch)/double(total_inserts));
346         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
347         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_resolve", g_insert_cache_resolve,
348                 100.0 * double(g_insert_cache_resolve)/double(total_inserts));
349         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
350         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_hit", g_insert_cache_hit,
351                 100.0 * double(g_insert_cache_hit)/double(total_inserts));
352         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
353         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_miss", g_insert_cache_miss,
354                 100.0 * double(g_insert_cache_miss)/double(total_inserts));
355         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
356         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_collide", g_insert_cache_collide,
357                 100.0 * double(g_insert_cache_collide)/double(total_inserts));
358         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
359         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_write", g_insert_cache_write,
360                 100.0 * double(g_insert_cache_write)/double(total_inserts));
361         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
362
363         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache data\r\n");
364         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
365
366         size_t total, used;
367         g_resolveCache->GetLoadFactor(&total, &used);
368
369         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_SIZE, "cache_entry_used", used);
370         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
371         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", g_cache_entry_counter);
372         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
373         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", g_cache_entry_space);
374         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
375
376         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub hash table data\r\n");
377         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
378
379         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space", g_bucket_space);
380         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
381         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space_dead", g_bucket_space_dead);
382         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
383
384         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache_load:\t%zu used, %zu total, utilization %#5.2f%%\r\n",
385                 used, total, 100.0 * double(used) / double(total));
386         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
387
388 #ifdef STUB_LOGGING
389         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache entry write counts\r\n");
390         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
391         DispatchCache::CacheEntryData *rgCacheData = g_resolveCache->cacheData;
392         for (UINT16 i = 0; i < CALL_STUB_CACHE_SIZE; i++)
393         {
394             sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), " %4d", rgCacheData[i]);
395             WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
396             if (i % 16 == 15)
397             {
398                 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n");
399                 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
400             }
401         }
402         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\n");
403         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
404 #endif // STUB_LOGGING
405
406 #if 0
407         for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
408         {
409             if (ContractImplMap::deltasDescs[i] != 0)
410             {
411                 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "deltasDescs[%d]\t%d\r\n", i, ContractImplMap::deltasDescs[i]);
412                 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
413             }
414         }
415         for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
416         {
417             if (ContractImplMap::deltasSlots[i] != 0)
418             {
419                 sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "deltasSlots[%d]\t%d\r\n", i, ContractImplMap::deltasSlots[i]);
420                 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
421             }
422         }
423         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "cout of maps:\t%d\r\n", ContractImplMap::countMaps);
424         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
425         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "count of interfaces:\t%d\r\n", ContractImplMap::countInterfaces);
426         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
427         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "count of deltas:\t%d\r\n", ContractImplMap::countDelta);
428         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
429         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "total delta for descs:\t%d\r\n", ContractImplMap::totalDeltaDescs);
430         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
431         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "total delta for slots:\t%d\r\n", ContractImplMap::totalDeltaSlots);
432         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
433
434 #endif // 0
435     }
436 }
437
438 void VirtualCallStubManager::FinishLogging()
439 {
440     LoggingDump();
441
442     if(g_hStubLogFile)
443     {
444         CloseHandle(g_hStubLogFile);
445     }
446     g_hStubLogFile = NULL;
447 }
448
449 void VirtualCallStubManager::ResetCache()
450 {
451     CONTRACTL
452     {
453         NOTHROW;
454         GC_TRIGGERS;
455         FORBID_FAULT;
456     }
457     CONTRACTL_END
458
459     g_resolveCache->LogStats();
460
461     g_insert_cache_external = 0;
462     g_insert_cache_shared = 0;
463     g_insert_cache_dispatch = 0;
464     g_insert_cache_resolve = 0;
465     g_insert_cache_hit = 0;
466     g_insert_cache_miss = 0;
467     g_insert_cache_collide = 0;
468     g_insert_cache_write = 0;
469
470     // Go through each cache entry and if the cache element there is in
471     // the cache entry heap of the manager being deleted, then we just
472     // set the cache entry to empty.
473     DispatchCache::Iterator it(g_resolveCache);
474     while (it.IsValid())
475     {
476         it.UnlinkEntry();
477     }
478
479 }
480
481 void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderAllocator)
482 {
483     CONTRACTL {
484         THROWS;
485         GC_TRIGGERS;
486         PRECONDITION(CheckPointer(pDomain));
487         INJECT_FAULT(COMPlusThrowOM(););
488     } CONTRACTL_END;
489
490     // Record the parent domain
491     parentDomain        = pDomain;
492     m_loaderAllocator   = pLoaderAllocator;
493
494     //
495     // Init critical sections
496     //
497
498     m_indCellLock.Init(CrstVSDIndirectionCellLock, CRST_UNSAFE_ANYMODE);
499
500     //
501     // Now allocate all BucketTables
502     //
503
504     NewHolder<BucketTable> resolvers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
505     NewHolder<BucketTable> dispatchers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS*2));
506     NewHolder<BucketTable> lookups_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
507     NewHolder<BucketTable> vtableCallers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
508     NewHolder<BucketTable> cache_entries_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
509
510     //
511     // Now allocate our LoaderHeaps
512     //
513
514     //
515     // First do some calculation to determine how many pages that we
516     // will need to commit and reserve for each out our loader heaps
517     //
518     DWORD indcell_heap_reserve_size;
519     DWORD indcell_heap_commit_size;
520     DWORD cache_entry_heap_reserve_size;
521     DWORD cache_entry_heap_commit_size;
522
523     //
524     // Setup an expected number of items to commit and reserve
525     //
526     // The commit number is not that important as we always commit at least one page worth of items
527     // The reserve number should be high enough to cover a typical lare application,
528     // in order to minimize the fragmentation of our rangelists
529     //
530
531     indcell_heap_commit_size     = 16;        indcell_heap_reserve_size      = 2000;
532     cache_entry_heap_commit_size = 16;        cache_entry_heap_reserve_size  =  800;
533
534     //
535     // Convert the number of items into a size in bytes to commit and reserve
536     //
537     indcell_heap_reserve_size       *= sizeof(void *);
538     indcell_heap_commit_size        *= sizeof(void *);
539
540     cache_entry_heap_reserve_size   *= sizeof(ResolveCacheElem);
541     cache_entry_heap_commit_size    *= sizeof(ResolveCacheElem);
542
543     //
544     // Align up all of the commit and reserve sizes
545     //
546     indcell_heap_reserve_size        = (DWORD) ALIGN_UP(indcell_heap_reserve_size,     GetOsPageSize());
547     indcell_heap_commit_size         = (DWORD) ALIGN_UP(indcell_heap_commit_size,      GetOsPageSize());
548
549     cache_entry_heap_reserve_size    = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, GetOsPageSize());
550     cache_entry_heap_commit_size     = (DWORD) ALIGN_UP(cache_entry_heap_commit_size,  GetOsPageSize());
551
552     BYTE * initReservedMem = NULL;
553
554     if (!m_loaderAllocator->IsCollectible())
555     {
556         DWORD dwTotalReserveMemSizeCalc  = indcell_heap_reserve_size     +
557                                            cache_entry_heap_reserve_size;
558
559         DWORD dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSizeCalc, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
560
561         // If there's wasted reserved memory, we hand this out to the heaps to avoid waste.
562         {
563             DWORD dwWastedReserveMemSize = dwTotalReserveMemSize - dwTotalReserveMemSizeCalc;
564             if (dwWastedReserveMemSize != 0)
565             {
566                 DWORD cWastedPages = dwWastedReserveMemSize / GetOsPageSize();
567
568                 // Split the wasted pages over the 2 LoaderHeaps that we allocate as part of a VirtualCallStubManager
569                 DWORD cPagesPerHeap = cWastedPages / 2;
570                 DWORD cPagesRemainder = cWastedPages % 2; // We'll throw this at the cache entry heap
571
572                 indcell_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
573                 cache_entry_heap_reserve_size += (cPagesPerHeap + cPagesRemainder) * GetOsPageSize();
574             }
575
576             CONSISTENCY_CHECK((indcell_heap_reserve_size     +
577                                cache_entry_heap_reserve_size)==
578                               dwTotalReserveMemSize);
579         }
580
581         initReservedMem = (BYTE*)ExecutableAllocator::Instance()->Reserve(dwTotalReserveMemSize);
582
583         m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
584
585         if (initReservedMem == NULL)
586             COMPlusThrowOM();
587     }
588     else
589     {
590         indcell_heap_reserve_size        = GetOsPageSize();
591         indcell_heap_commit_size         = GetOsPageSize();
592
593         cache_entry_heap_reserve_size    = GetOsPageSize();
594         cache_entry_heap_commit_size     = GetOsPageSize();
595
596 #ifdef _DEBUG
597         DWORD dwTotalReserveMemSizeCalc  = indcell_heap_reserve_size     +
598                                            cache_entry_heap_reserve_size;
599 #endif
600
601         DWORD dwActualVSDSize = 0;
602
603         initReservedMem = pLoaderAllocator->GetVSDHeapInitialBlock(&dwActualVSDSize);
604         _ASSERTE(dwActualVSDSize == dwTotalReserveMemSizeCalc);
605
606         m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
607
608         if (initReservedMem == NULL)
609             COMPlusThrowOM();
610     }
611
612     // Hot  memory, Writable, No-Execute, infrequent writes
613     NewHolder<LoaderHeap> indcell_heap_holder(
614                                new LoaderHeap(indcell_heap_reserve_size, indcell_heap_commit_size,
615                                               initReservedMem, indcell_heap_reserve_size,
616                                               NULL, UnlockedLoaderHeap::HeapKind::Data));
617
618     initReservedMem += indcell_heap_reserve_size;
619
620     // Hot  memory, Writable, No-Execute, infrequent writes
621     NewHolder<LoaderHeap> cache_entry_heap_holder(
622                                new LoaderHeap(cache_entry_heap_reserve_size, cache_entry_heap_commit_size,
623                                               initReservedMem, cache_entry_heap_reserve_size,
624                                               &cache_entry_rangeList, UnlockedLoaderHeap::HeapKind::Data));
625
626     initReservedMem += cache_entry_heap_reserve_size;
627
628     // Warm memory, Writable, Execute, write exactly once
629     NewHolder<CodeFragmentHeap> lookup_heap_holder(
630                                new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_LOOKUP_STUB));
631
632     // Hot  memory, Writable, Execute, write exactly once
633     NewHolder<CodeFragmentHeap> dispatch_heap_holder(
634                                new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_DISPATCH_STUB));
635
636     // Hot  memory, Writable, Execute, write exactly once
637     NewHolder<CodeFragmentHeap> resolve_heap_holder(
638                                new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_RESOLVE_STUB));
639
640     // Hot  memory, Writable, Execute, write exactly once
641     NewHolder<CodeFragmentHeap> vtable_heap_holder(
642                                new CodeFragmentHeap(pLoaderAllocator, STUB_CODE_BLOCK_VSD_VTABLE_STUB));
643
644     // Allocate the initial counter block
645     NewHolder<counter_block> m_counters_holder(new counter_block);
646
647     //
648     // On success of every allocation, assign the objects and suppress the release
649     //
650
651     indcell_heap     = indcell_heap_holder;     indcell_heap_holder.SuppressRelease();
652     lookup_heap      = lookup_heap_holder;      lookup_heap_holder.SuppressRelease();
653     dispatch_heap    = dispatch_heap_holder;    dispatch_heap_holder.SuppressRelease();
654     resolve_heap     = resolve_heap_holder;     resolve_heap_holder.SuppressRelease();
655     vtable_heap      = vtable_heap_holder;      vtable_heap_holder.SuppressRelease();
656     cache_entry_heap = cache_entry_heap_holder; cache_entry_heap_holder.SuppressRelease();
657
658     resolvers        = resolvers_holder;        resolvers_holder.SuppressRelease();
659     dispatchers      = dispatchers_holder;      dispatchers_holder.SuppressRelease();
660     lookups          = lookups_holder;          lookups_holder.SuppressRelease();
661     vtableCallers    = vtableCallers_holder;    vtableCallers_holder.SuppressRelease();
662     cache_entries    = cache_entries_holder;    cache_entries_holder.SuppressRelease();
663
664     m_counters       = m_counters_holder;       m_counters_holder.SuppressRelease();
665
666     // Create the initial failure counter block
667     m_counters->next = NULL;
668     m_counters->used = 0;
669     m_cur_counter_block = m_counters;
670
671     m_cur_counter_block_for_reclaim = m_counters;
672     m_cur_counter_block_for_reclaim_index = 0;
673
674     // Keep track of all of our managers
675     VirtualCallStubManagerManager::GlobalManager()->AddStubManager(this);
676 }
677
678 void VirtualCallStubManager::Uninit()
679 {
680     WRAPPER_NO_CONTRACT;
681
682     // Keep track of all our managers
683     VirtualCallStubManagerManager::GlobalManager()->RemoveStubManager(this);
684 }
685
686 VirtualCallStubManager::~VirtualCallStubManager()
687 {
688     CONTRACTL {
689         NOTHROW;
690         GC_NOTRIGGER;
691         FORBID_FAULT;
692     } CONTRACTL_END;
693
694     LogStats();
695
696     // Go through each cache entry and if the cache element there is in
697     // the cache entry heap of the manager being deleted, then we just
698     // set the cache entry to empty.
699     DispatchCache::Iterator it(g_resolveCache);
700     while (it.IsValid())
701     {
702         // Using UnlinkEntry performs an implicit call to Next (see comment for UnlinkEntry).
703         // Thus, we need to avoid calling Next when we delete an entry so
704         // that we don't accidentally skip entries.
705         while (it.IsValid() && cache_entry_rangeList.IsInRange((TADDR)it.Entry()))
706         {
707             it.UnlinkEntry();
708         }
709         it.Next();
710     }
711
712     if (indcell_heap)     { delete indcell_heap;     indcell_heap     = NULL;}
713     if (lookup_heap)      { delete lookup_heap;      lookup_heap      = NULL;}
714     if (dispatch_heap)    { delete dispatch_heap;    dispatch_heap    = NULL;}
715     if (resolve_heap)     { delete resolve_heap;     resolve_heap     = NULL;}
716     if (vtable_heap)      { delete vtable_heap;      vtable_heap      = NULL;}
717     if (cache_entry_heap) { delete cache_entry_heap; cache_entry_heap = NULL;}
718
719     if (resolvers)        { delete resolvers;        resolvers        = NULL;}
720     if (dispatchers)      { delete dispatchers;      dispatchers      = NULL;}
721     if (lookups)          { delete lookups;          lookups          = NULL;}
722     if (vtableCallers)    { delete vtableCallers;    vtableCallers    = NULL;}
723     if (cache_entries)    { delete cache_entries;    cache_entries    = NULL;}
724
725     // Now get rid of the memory taken by the counter_blocks
726     while (m_counters != NULL)
727     {
728         counter_block *del = m_counters;
729         m_counters = m_counters->next;
730         delete del;
731     }
732
733     // This was the block reserved by Init for the heaps.
734     // For the collectible case, the VSD logic does not allocate the memory.
735     if (m_initialReservedMemForHeaps && !m_loaderAllocator->IsCollectible())
736         ClrVirtualFree (m_initialReservedMemForHeaps, 0, MEM_RELEASE);
737
738     // Free critical section
739     m_indCellLock.Destroy();
740 }
741
742 // Initialize static structures, and start up logging if necessary
743 void VirtualCallStubManager::InitStatic()
744 {
745     STANDARD_VM_CONTRACT;
746
747 #ifdef STUB_LOGGING
748     // Note if you change these values using environment variables then you must use hex values :-(
749     STUB_MISS_COUNT_VALUE  = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubMissCount);
750     STUB_COLLIDE_WRITE_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideWritePct);
751     STUB_COLLIDE_MONO_PCT  = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideMonoPct);
752     g_dumpLogCounter       = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogCounter);
753     g_dumpLogIncr          = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogIncr);
754     g_resetCacheCounter    = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheCounter);
755     g_resetCacheIncr       = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheIncr);
756 #endif // STUB_LOGGING
757
758 #ifndef STUB_DISPATCH_PORTABLE
759     DispatchHolder::InitializeStatic();
760     ResolveHolder::InitializeStatic();
761 #endif // !STUB_DISPATCH_PORTABLE
762     LookupHolder::InitializeStatic();
763
764     g_resolveCache = new DispatchCache();
765
766     if(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_VirtualCallStubLogging))
767         StartupLogging();
768
769     VirtualCallStubManagerManager::InitStatic();
770 }
771
772 // Static shutdown code.
773 // At the moment, this doesn't do anything more than log statistics.
774 void VirtualCallStubManager::UninitStatic()
775 {
776     CONTRACTL
777     {
778         NOTHROW;
779         GC_TRIGGERS;
780         FORBID_FAULT;
781     }
782     CONTRACTL_END
783
784     if (g_hStubLogFile != NULL)
785     {
786         VirtualCallStubManagerIterator it =
787             VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
788         while (it.Next())
789         {
790             it.Current()->LogStats();
791         }
792
793         g_resolveCache->LogStats();
794
795         FinishLogging();
796     }
797 }
798
799 /* reclaim/rearrange any structures that can only be done during a gc sync point
800 i.e. need to be serialized and non-concurrant. */
801 void VirtualCallStubManager::ReclaimAll()
802 {
803     STATIC_CONTRACT_NOTHROW;
804     STATIC_CONTRACT_GC_NOTRIGGER;
805     STATIC_CONTRACT_FORBID_FAULT;
806
807     /* @todo: if/when app domain unloading is supported,
808     and when we have app domain specific stub heaps, we can complete the unloading
809     of an app domain stub heap at this point, and make any patches to existing stubs that are
810     not being unload so that they nolonger refer to any of the unloaded app domains code or types
811     */
812
813     //reclaim space of abandoned buckets
814     BucketTable::Reclaim();
815
816     VirtualCallStubManagerIterator it =
817         VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
818     while (it.Next())
819     {
820         it.Current()->Reclaim();
821     }
822
823     g_reclaim_counter++;
824 }
825
826 /* reclaim/rearrange any structures that can only be done during a gc sync point
827 i.e. need to be serialized and non-concurrant. */
828 void VirtualCallStubManager::Reclaim()
829 {
830     LIMITED_METHOD_CONTRACT;
831
832     UINT32 limit = min(counter_block::MAX_COUNTER_ENTRIES,
833                                   m_cur_counter_block_for_reclaim->used);
834     limit = min(m_cur_counter_block_for_reclaim_index + 16,  limit);
835
836     for (UINT32 i = m_cur_counter_block_for_reclaim_index; i < limit; i++)
837     {
838         m_cur_counter_block_for_reclaim->block[i] += (STUB_MISS_COUNT_VALUE/10)+1;
839     }
840
841     // Increment the index by the number we processed
842     m_cur_counter_block_for_reclaim_index = limit;
843
844     // If we ran to the end of the block, go to the next
845     if (m_cur_counter_block_for_reclaim_index == m_cur_counter_block->used)
846     {
847         m_cur_counter_block_for_reclaim = m_cur_counter_block_for_reclaim->next;
848         m_cur_counter_block_for_reclaim_index = 0;
849
850         // If this was the last block in the chain, go back to the beginning
851         if (m_cur_counter_block_for_reclaim == NULL)
852             m_cur_counter_block_for_reclaim = m_counters;
853     }
854 }
855
856 #endif // !DACCESS_COMPILE
857
858 //----------------------------------------------------------------------------
859 /* static */
860 VirtualCallStubManager *VirtualCallStubManager::FindStubManager(PCODE stubAddress,  StubCodeBlockKind* wbStubKind)
861 {
862     CONTRACTL {
863         NOTHROW;
864         GC_NOTRIGGER;
865         FORBID_FAULT;
866         SUPPORTS_DAC;
867     } CONTRACTL_END
868
869     StubCodeBlockKind unusedStubKind;
870     if (wbStubKind == NULL)
871     {
872         wbStubKind = &unusedStubKind;
873     }
874
875     *wbStubKind = STUB_CODE_BLOCK_UNKNOWN;
876
877     RangeSection * pRS = ExecutionManager::FindCodeRange(stubAddress, ExecutionManager::ScanReaderLock);
878     if (pRS == NULL)
879         return NULL;
880
881     StubCodeBlockKind kind = pRS->_pjit->GetStubCodeBlockKind(pRS, stubAddress);
882     switch (kind)
883     {
884     case STUB_CODE_BLOCK_VSD_DISPATCH_STUB:
885     case STUB_CODE_BLOCK_VSD_RESOLVE_STUB:
886     case STUB_CODE_BLOCK_VSD_LOOKUP_STUB:
887     case STUB_CODE_BLOCK_VSD_VTABLE_STUB:
888         // This is a VSD stub, using the RangeSection identify which LoaderAllocator this is from
889         _ASSERTE(pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP);
890         *wbStubKind = kind;
891         return pRS->_pHeapList->pLoaderAllocator->GetVirtualCallStubManager();
892
893     default:
894         return NULL;
895     }
896 }
897
898 /* for use by debugger.
899 */
900 BOOL VirtualCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
901 {
902     STATIC_CONTRACT_NOTHROW;
903     STATIC_CONTRACT_GC_NOTRIGGER;
904     STATIC_CONTRACT_FORBID_FAULT;
905     SUPPORTS_DAC;
906
907     // Forwarded to from RangeSectionStubManager
908     return FALSE;
909 }
910
911 /* for use by debugger.
912 */
913
914 extern "C" void STDCALL StubDispatchFixupPatchLabel();
915
916 BOOL VirtualCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
917 {
918     LIMITED_METHOD_CONTRACT;
919
920     LOG((LF_CORDB, LL_EVERYTHING, "VirtualCallStubManager::DoTraceStub called\n"));
921
922     // @workaround: Well, we really need the context to figure out where we're going, so
923     // we'll do a TRACE_MGR_PUSH so that TraceManager gets called and we can use
924     // the provided context to figure out where we're going.
925     trace->InitForManagerPush(stubStartAddress, this);
926     return TRUE;
927 }
928
929 //----------------------------------------------------------------------------
930 BOOL VirtualCallStubManager::TraceManager(Thread *thread,
931                               TraceDestination *trace,
932                               T_CONTEXT *pContext,
933                               BYTE **pRetAddr)
934 {
935     CONTRACTL
936     {
937         THROWS;
938         GC_TRIGGERS;
939         INJECT_FAULT(COMPlusThrowOM(););
940     }
941     CONTRACTL_END
942
943     TADDR pStub = GetIP(pContext);
944
945     // The return address should be on the top of the stack
946     *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
947
948     // Get the token from the stub
949     DispatchToken token(GetTokenFromStub(pStub));
950
951     // Get the this object from ECX
952     Object *pObj = StubManagerHelpers::GetThisPtr(pContext);
953
954     // Call common trace code.
955     return (TraceResolver(pObj, token, trace));
956 }
957
958 #ifndef DACCESS_COMPILE
959
960 PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, MethodDesc *pMD)
961 {
962     CONTRACTL {
963         THROWS;
964         GC_TRIGGERS;
965         MODE_ANY;
966         PRECONDITION(CheckPointer(pMD));
967         PRECONDITION(!pMD->IsInterface() || ownerType.GetMethodTable()->HasSameTypeDefAs(pMD->GetMethodTable()));
968         INJECT_FAULT(COMPlusThrowOM(););
969     } CONTRACTL_END;
970
971     return GetCallStub(ownerType, pMD->GetSlot());
972 }
973
974 //find or create a stub
975 PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, DWORD slot)
976 {
977     CONTRACT (PCODE) {
978         THROWS;
979         GC_TRIGGERS;
980         MODE_ANY;
981         INJECT_FAULT(COMPlusThrowOM(););
982         POSTCONDITION(RETVAL != NULL);
983     } CONTRACT_END;
984
985     GCX_COOP(); // This is necessary for BucketTable synchronization
986
987     MethodTable * pMT = ownerType.GetMethodTable();
988
989     DispatchToken token;
990     if (pMT->IsInterface())
991         token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
992     else
993         token = DispatchToken::CreateDispatchToken(slot);
994
995     //get a stub from lookups, make if necessary
996     PCODE stub = CALL_STUB_EMPTY_ENTRY;
997     PCODE addrOfResolver = GetEEFuncEntryPoint(ResolveWorkerAsmStub);
998
999     LookupEntry entryL;
1000     Prober probeL(&entryL);
1001     if (lookups->SetUpProber(token.To_SIZE_T(), 0, &probeL))
1002     {
1003         if ((stub = (PCODE)(lookups->Find(&probeL))) == CALL_STUB_EMPTY_ENTRY)
1004         {
1005             LookupHolder *pLookupHolder = GenerateLookupStub(addrOfResolver, token.To_SIZE_T());
1006             stub = (PCODE) (lookups->Add((size_t)(pLookupHolder->stub()->entryPoint()), &probeL));
1007         }
1008     }
1009
1010     _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1011     stats.site_counter++;
1012
1013     RETURN (stub);
1014 }
1015
1016 PCODE VirtualCallStubManager::GetVTableCallStub(DWORD slot)
1017 {
1018     CONTRACT(PCODE) {
1019         THROWS;
1020         GC_TRIGGERS;
1021         MODE_ANY;
1022         INJECT_FAULT(COMPlusThrowOM(););
1023         POSTCONDITION(RETVAL != NULL);
1024     } CONTRACT_END;
1025
1026     GCX_COOP(); // This is necessary for BucketTable synchronization
1027
1028     PCODE stub = CALL_STUB_EMPTY_ENTRY;
1029
1030     VTableCallEntry entry;
1031     Prober probe(&entry);
1032     if (vtableCallers->SetUpProber(DispatchToken::CreateDispatchToken(slot).To_SIZE_T(), 0, &probe))
1033     {
1034         if ((stub = (PCODE)(vtableCallers->Find(&probe))) == CALL_STUB_EMPTY_ENTRY)
1035         {
1036             VTableCallHolder *pHolder = GenerateVTableCallStub(slot);
1037             stub = (PCODE)(vtableCallers->Add((size_t)(pHolder->stub()->entryPoint()), &probe));
1038         }
1039     }
1040
1041     _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1042     RETURN(stub);
1043 }
1044
1045 VTableCallHolder* VirtualCallStubManager::GenerateVTableCallStub(DWORD slot)
1046 {
1047     CONTRACT(VTableCallHolder*) {
1048         THROWS;
1049         GC_TRIGGERS;
1050         MODE_ANY;
1051         INJECT_FAULT(COMPlusThrowOM(););
1052         POSTCONDITION(RETVAL != NULL);
1053     } CONTRACT_END;
1054
1055     //allocate from the requisite heap and copy the template over it.
1056     size_t vtableHolderSize = VTableCallHolder::GetHolderSize(slot);
1057     VTableCallHolder * pHolder = (VTableCallHolder*)(void*)vtable_heap->AllocAlignedMem(vtableHolderSize, CODE_SIZE_ALIGN);
1058     ExecutableWriterHolder<VTableCallHolder> vtableWriterHolder(pHolder, vtableHolderSize);
1059     vtableWriterHolder.GetRW()->Initialize(slot);
1060
1061     ClrFlushInstructionCache(pHolder->stub(), pHolder->stub()->size());
1062
1063     //incr our counters
1064     stats.stub_vtable_counter++;
1065     stats.stub_space += (UINT32)pHolder->stub()->size();
1066     LOG((LF_STUBS, LL_INFO10000, "GenerateVTableCallStub for slot " FMT_ADDR "at" FMT_ADDR "\n",
1067         DBG_ADDR(slot), DBG_ADDR(pHolder->stub())));
1068
1069 #ifdef FEATURE_PERFMAP
1070     PerfMap::LogStubs(__FUNCTION__, "GenerateVTableCallStub", (PCODE)pHolder->stub(), pHolder->stub()->size());
1071 #endif
1072
1073     RETURN(pHolder);
1074 }
1075
1076 //+----------------------------------------------------------------------------
1077 //
1078 //  Method:     VirtualCallStubManager::GenerateStubIndirection
1079 //
1080 //  Synopsis:   This method allocates an indirection cell for use by the virtual stub dispatch (currently
1081 //              only implemented for interface calls).
1082 //              For normal methods: the indirection cell allocated will never be freed until app domain unload
1083 //              For dynamic methods: we recycle the indirection cells when a dynamic method is collected. To
1084 //              do that we keep all the recycled indirection cells in a linked list: m_RecycledIndCellList. When
1085 //              the dynamic method needs an indirection cell it allocates one from m_RecycledIndCellList. Each
1086 //              dynamic method keeps track of all the indirection cells it uses and add them back to
1087 //              m_RecycledIndCellList when it is finalized.
1088 //
1089 //+----------------------------------------------------------------------------
1090 BYTE *VirtualCallStubManager::GenerateStubIndirection(PCODE target, BOOL fUseRecycledCell /* = FALSE*/ )
1091 {
1092     CONTRACT (BYTE*) {
1093         THROWS;
1094         GC_TRIGGERS;
1095         INJECT_FAULT(COMPlusThrowOM(););
1096         PRECONDITION(target != NULL);
1097         POSTCONDITION(CheckPointer(RETVAL));
1098     } CONTRACT_END;
1099
1100     _ASSERTE(isStubStatic(target));
1101
1102     CrstHolder lh(&m_indCellLock);
1103
1104     // The indirection cell to hold the pointer to the stub
1105     BYTE * ret              = NULL;
1106     UINT32 cellsPerBlock    = INDCELLS_PER_BLOCK;
1107
1108     // First try the recycled indirection cell list for Dynamic methods
1109     if (fUseRecycledCell)
1110         ret = GetOneRecycledIndCell();
1111
1112     // Try the free indirection cell list
1113     if (!ret)
1114         ret = GetOneFreeIndCell();
1115
1116     // Allocate from loader heap
1117     if (!ret)
1118     {
1119         // Free list is empty, allocate a block of indcells from indcell_heap and insert it into the free list.
1120         BYTE ** pBlock = (BYTE **) (void *) indcell_heap->AllocMem(S_SIZE_T(cellsPerBlock) * S_SIZE_T(sizeof(BYTE *)));
1121
1122         // return the first cell in the block and add the rest to the free list
1123         ret = (BYTE *)pBlock;
1124
1125         // link all the cells together
1126         // we don't need to null terminate the linked list, InsertIntoFreeIndCellList will do it.
1127         for (UINT32 i = 1; i < cellsPerBlock - 1; ++i)
1128         {
1129             pBlock[i] = (BYTE *)&(pBlock[i+1]);
1130         }
1131
1132         // insert the list into the free indcell list.
1133         InsertIntoFreeIndCellList((BYTE *)&pBlock[1], (BYTE*)&pBlock[cellsPerBlock - 1]);
1134     }
1135
1136     *((PCODE *)ret) = target;
1137     RETURN ret;
1138 }
1139
1140 ResolveCacheElem *VirtualCallStubManager::GetResolveCacheElem(void *pMT,
1141                                                               size_t token,
1142                                                               void *target)
1143 {
1144     CONTRACTL
1145     {
1146         THROWS;
1147         GC_TRIGGERS;
1148         MODE_COOPERATIVE;
1149         INJECT_FAULT(COMPlusThrowOM(););
1150     }
1151     CONTRACTL_END
1152
1153     //get an cache entry elem, or make one if necessary
1154     ResolveCacheElem* elem = NULL;
1155     ResolveCacheEntry entryRC;
1156     Prober probeRC(&entryRC);
1157     if (cache_entries->SetUpProber(token, (size_t) pMT, &probeRC))
1158     {
1159         elem = (ResolveCacheElem*) (cache_entries->Find(&probeRC));
1160         if (elem  == CALL_STUB_EMPTY_ENTRY)
1161         {
1162             bool reenteredCooperativeGCMode = false;
1163             elem = GenerateResolveCacheElem(target, pMT, token, &reenteredCooperativeGCMode);
1164             if (reenteredCooperativeGCMode)
1165             {
1166                 // The prober may have been invalidated by reentering cooperative GC mode, reset it
1167                 BOOL success = cache_entries->SetUpProber(token, (size_t)pMT, &probeRC);
1168                 _ASSERTE(success);
1169             }
1170             elem = (ResolveCacheElem*) (cache_entries->Add((size_t) elem, &probeRC));
1171         }
1172     }
1173     _ASSERTE(elem && (elem != CALL_STUB_EMPTY_ENTRY));
1174     return elem;
1175 }
1176
1177 #endif // !DACCESS_COMPILE
1178
1179 size_t VirtualCallStubManager::GetTokenFromStub(PCODE stub)
1180 {
1181     CONTRACTL
1182     {
1183         NOTHROW;
1184         GC_NOTRIGGER;
1185         FORBID_FAULT;
1186     }
1187     CONTRACTL_END
1188
1189     _ASSERTE(stub != NULL);
1190     StubCodeBlockKind         stubKind = STUB_CODE_BLOCK_UNKNOWN;
1191     VirtualCallStubManager *  pMgr     = FindStubManager(stub, &stubKind);
1192
1193     return GetTokenFromStubQuick(pMgr, stub, stubKind);
1194 }
1195
1196 size_t VirtualCallStubManager::GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubCodeBlockKind kind)
1197 {
1198     CONTRACTL
1199     {
1200         NOTHROW;
1201         GC_NOTRIGGER;
1202         FORBID_FAULT;
1203     }
1204     CONTRACTL_END
1205
1206     _ASSERTE(pMgr != NULL);
1207     _ASSERTE(stub != NULL);
1208     _ASSERTE(kind != STUB_CODE_BLOCK_UNKNOWN);
1209
1210 #ifndef DACCESS_COMPILE
1211
1212     if (kind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
1213     {
1214         _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_DISPATCH_STUB);
1215         DispatchStub  * dispatchStub  = (DispatchStub *) PCODEToPINSTR(stub);
1216         ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(dispatchStub->failTarget());
1217         _ASSERTE(isResolvingStubStatic(resolveHolder->stub()->resolveEntryPoint()));
1218         return resolveHolder->stub()->token();
1219     }
1220     else if (kind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
1221     {
1222         _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_RESOLVE_STUB);
1223         ResolveHolder * resolveHolder = ResolveHolder::FromResolveEntry(stub);
1224         return resolveHolder->stub()->token();
1225     }
1226     else if (kind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1227     {
1228         _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_LOOKUP_STUB);
1229         LookupHolder  * lookupHolder  = LookupHolder::FromLookupEntry(stub);
1230         return lookupHolder->stub()->token();
1231     }
1232     else if (kind == STUB_CODE_BLOCK_VSD_VTABLE_STUB)
1233     {
1234         _ASSERTE(RangeSectionStubManager::GetStubKind(stub) == STUB_CODE_BLOCK_VSD_VTABLE_STUB);
1235         VTableCallStub * vtableStub = (VTableCallStub *)PCODEToPINSTR(stub);
1236         return vtableStub->token();
1237     }
1238
1239     _ASSERTE(!"Should not get here.");
1240
1241 #else // DACCESS_COMPILE
1242
1243     DacNotImpl();
1244
1245 #endif // DACCESS_COMPILE
1246
1247     return 0;
1248 }
1249
1250 #ifndef DACCESS_COMPILE
1251
1252 #ifdef CHAIN_LOOKUP
1253 ResolveCacheElem* __fastcall VirtualCallStubManager::PromoteChainEntry(ResolveCacheElem *pElem)
1254 {
1255     CONTRACTL {
1256         NOTHROW;
1257         GC_NOTRIGGER;
1258         FORBID_FAULT;
1259         PRECONDITION(CheckPointer(pElem));
1260     } CONTRACTL_END;
1261
1262     g_resolveCache->PromoteChainEntry(pElem);
1263     return pElem;
1264 }
1265 #endif // CHAIN_LOOKUP
1266
1267 /* Resolve to a method and return its address or NULL if there is none.
1268    Our return value is the target address that control should continue to.  Our caller will
1269    enter the target address as if a direct call with the original stack frame had been made from
1270    the actual call site.  Hence our strategy is to either return a target address
1271    of the actual method implementation, or the prestub if we cannot find the actual implementation.
1272    If we are returning a real method address, we may patch the original call site to point to a
1273    dispatching stub before returning.  Note, if we encounter a method that hasn't been jitted
1274    yet, we will return the prestub, which should cause it to be jitted and we will
1275    be able to build the dispatching stub on a later call thru the call site.  If we encounter
1276    any other kind of problem, rather than throwing an exception, we will also return the
1277    prestub, unless we are unable to find the method at all, in which case we return NULL.
1278    */
1279 PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
1280                         TADDR siteAddrForRegisterIndirect,
1281                         size_t token
1282 #ifndef TARGET_X86
1283                         , UINT_PTR flags
1284 #endif
1285                         )
1286 {
1287     CONTRACTL {
1288         THROWS;
1289         GC_TRIGGERS;
1290         INJECT_FAULT(COMPlusThrowOM(););
1291         PRECONDITION(CheckPointer(pTransitionBlock));
1292         MODE_COOPERATIVE;
1293     } CONTRACTL_END;
1294
1295     MAKE_CURRENT_THREAD_AVAILABLE();
1296
1297 #ifdef _DEBUG
1298     Thread::ObjectRefFlush(CURRENT_THREAD);
1299 #endif
1300
1301     FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
1302     StubDispatchFrame * pSDFrame = &frame;
1303
1304     PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
1305
1306     StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1307
1308     OBJECTREF *protectedObj = pSDFrame->GetThisPtr();
1309     _ASSERTE(protectedObj != NULL);
1310     OBJECTREF pObj = *protectedObj;
1311
1312     PCODE target = NULL;
1313
1314     if (pObj == NULL) {
1315         pSDFrame->SetForNullReferenceException();
1316         pSDFrame->Push(CURRENT_THREAD);
1317         INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1318         INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1319         COMPlusThrow(kNullReferenceException);
1320         UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1321         UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1322         _ASSERTE(!"Throw returned");
1323     }
1324
1325 #ifndef TARGET_X86
1326     if (flags & SDF_ResolvePromoteChain)
1327     {
1328         ResolveCacheElem * pElem =  (ResolveCacheElem *)token;
1329         g_resolveCache->PromoteChainEntry(pElem);
1330         target = (PCODE) pElem->target;
1331
1332         // Have we failed the dispatch stub too many times?
1333         if (flags & SDF_ResolveBackPatch)
1334         {
1335             PCODE stubAddr = callSite.GetSiteTarget();
1336             VirtualCallStubManager * pMgr = VirtualCallStubManager::FindStubManager(stubAddr);
1337             pMgr->BackPatchWorker(&callSite);
1338         }
1339
1340         return target;
1341     }
1342 #endif
1343
1344     pSDFrame->SetCallSite(NULL, (TADDR)callSite.GetIndirectCell());
1345
1346     DispatchToken representativeToken(token);
1347     MethodTable * pRepresentativeMT = pObj->GetMethodTable();
1348     if (representativeToken.IsTypedToken())
1349     {
1350         pRepresentativeMT = CURRENT_THREAD->GetDomain()->LookupType(representativeToken.GetTypeID());
1351         CONSISTENCY_CHECK(CheckPointer(pRepresentativeMT));
1352     }
1353
1354     pSDFrame->SetRepresentativeSlot(pRepresentativeMT, representativeToken.GetSlotNumber());
1355     pSDFrame->Push(CURRENT_THREAD);
1356     INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1357     INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1358
1359     // For Virtual Delegates the m_siteAddr is a field of a managed object
1360     // Thus we have to report it as an interior pointer,
1361     // so that it is updated during a gc
1362     GCPROTECT_BEGININTERIOR( *(callSite.GetIndirectCellAddress()) );
1363
1364     GCStress<vsd_on_resolve>::MaybeTriggerAndProtect(pObj);
1365
1366     PCODE callSiteTarget = callSite.GetSiteTarget();
1367     CONSISTENCY_CHECK(callSiteTarget != NULL);
1368
1369     StubCodeBlockKind   stubKind = STUB_CODE_BLOCK_UNKNOWN;
1370     VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget, &stubKind);
1371     PREFIX_ASSUME(pMgr != NULL);
1372
1373 #ifndef TARGET_X86
1374     // Have we failed the dispatch stub too many times?
1375     if (flags & SDF_ResolveBackPatch)
1376     {
1377         pMgr->BackPatchWorker(&callSite);
1378     }
1379 #endif
1380
1381     target = pMgr->ResolveWorker(&callSite, protectedObj, representativeToken, stubKind);
1382
1383 #if _DEBUG
1384     if (pSDFrame->GetGCRefMap() != NULL)
1385     {
1386         GCX_PREEMP();
1387         _ASSERTE(CheckGCRefMapEqual(pSDFrame->GetGCRefMap(), pSDFrame->GetFunction(), true));
1388     }
1389 #endif // _DEBUG
1390
1391     GCPROTECT_END();
1392
1393     UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1394     UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1395     pSDFrame->Pop(CURRENT_THREAD);
1396
1397     return target;
1398 }
1399
1400 void VirtualCallStubManager::BackPatchWorkerStatic(PCODE returnAddress, TADDR siteAddrForRegisterIndirect)
1401 {
1402     CONTRACTL {
1403         NOTHROW;
1404         GC_NOTRIGGER;
1405         FORBID_FAULT;
1406         ENTRY_POINT;
1407         PRECONDITION(returnAddress != NULL);
1408     } CONTRACTL_END
1409
1410     StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1411
1412     PCODE callSiteTarget = callSite.GetSiteTarget();
1413     CONSISTENCY_CHECK(callSiteTarget != NULL);
1414
1415     VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget);
1416     PREFIX_ASSUME(pMgr != NULL);
1417
1418     pMgr->BackPatchWorker(&callSite);
1419 }
1420
1421 #if defined(TARGET_X86) && defined(TARGET_UNIX)
1422 void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect)
1423 {
1424     VirtualCallStubManager::BackPatchWorkerStatic(returnAddr, siteAddrForRegisterIndirect);
1425 }
1426 #endif
1427
1428 PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite,
1429                                             OBJECTREF *protectedObj,
1430                                             DispatchToken token,
1431                                             StubCodeBlockKind stubKind)
1432 {
1433     CONTRACTL {
1434         THROWS;
1435         GC_TRIGGERS;
1436         MODE_COOPERATIVE;
1437         INJECT_FAULT(COMPlusThrowOM(););
1438         PRECONDITION(protectedObj != NULL);
1439         PRECONDITION(*protectedObj != NULL);
1440         PRECONDITION(IsProtectedByGCFrame(protectedObj));
1441     } CONTRACTL_END;
1442
1443     MethodTable* objectType = (*protectedObj)->GetMethodTable();
1444     CONSISTENCY_CHECK(CheckPointer(objectType));
1445
1446 #ifdef STUB_LOGGING
1447     if (g_dumpLogCounter != 0)
1448     {
1449         UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1450
1451         if (total_calls > g_dumpLogCounter)
1452         {
1453             VirtualCallStubManager::LoggingDump();
1454             if (g_dumpLogIncr == 0)
1455                 g_dumpLogCounter = 0;
1456             else
1457                 g_dumpLogCounter += g_dumpLogIncr;
1458         }
1459     }
1460
1461     if (g_resetCacheCounter != 0)
1462     {
1463         UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1464
1465         if (total_calls > g_resetCacheCounter)
1466         {
1467             VirtualCallStubManager::ResetCache();
1468             if (g_resetCacheIncr == 0)
1469                 g_resetCacheCounter = 0;
1470             else
1471                 g_resetCacheCounter += g_resetCacheIncr;
1472         }
1473     }
1474 #endif // STUB_LOGGING
1475
1476     //////////////////////////////////////////////////////////////
1477     // Get the managers associated with the callee
1478
1479     VirtualCallStubManager *pCalleeMgr = NULL;  // Only set if the caller is shared, NULL otherwise
1480
1481     BOOL bCallToShorterLivedTarget = FALSE;
1482
1483     // We care about the following cases:
1484     // Call from any site -> collectible target
1485     if (objectType->GetLoaderAllocator()->IsCollectible())
1486     {
1487         // The callee's manager
1488         pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
1489         if (pCalleeMgr != this)
1490         {
1491             bCallToShorterLivedTarget = TRUE;
1492         }
1493         else
1494         {
1495             pCalleeMgr = NULL;
1496         }
1497     }
1498
1499     stats.worker_call++;
1500
1501     LOG((LF_STUBS, LL_INFO100000, "ResolveWorker from %sStub, token" FMT_ADDR "object's MT" FMT_ADDR  "ind-cell" FMT_ADDR "call-site" FMT_ADDR "%s\n",
1502          (stubKind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB) ? "Dispatch" : (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB) ? "Resolve" : (stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB) ? "Lookup" : "Unknown",
1503          DBG_ADDR(token.To_SIZE_T()), DBG_ADDR(objectType), DBG_ADDR(pCallSite->GetIndirectCell()), DBG_ADDR(pCallSite->GetReturnAddress()),
1504          bCallToShorterLivedTarget ? "bCallToShorterLivedTarget" : "" ));
1505
1506     PCODE stub = CALL_STUB_EMPTY_ENTRY;
1507     PCODE target = NULL;
1508     BOOL patch = FALSE;
1509
1510     // This code can throw an OOM, but we do not want to fail in this case because
1511     // we must always successfully determine the target of a virtual call so that
1512     // CERs can work (there are a couple of exceptions to this involving generics).
1513     // Since the code below is just trying to see if a stub representing the current
1514     // type and token exist, it is not strictly necessary in determining the target.
1515     // We will treat the case of an OOM the same as the case of not finding an entry
1516     // in the hash tables and will continue on to the slow resolve case, which is
1517     // guaranteed not to fail outside of a couple of generics-specific cases.
1518     EX_TRY
1519     {
1520         /////////////////////////////////////////////////////////////////////////////
1521         // First see if we can find a dispatcher stub for this token and type. If a
1522         // match is found, use the target stored in the entry.
1523         {
1524             DispatchEntry entryD;
1525             Prober probeD(&entryD);
1526             if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1527             {
1528                 stub = (PCODE) dispatchers->Find(&probeD);
1529                 if (stub != CALL_STUB_EMPTY_ENTRY)
1530                 {
1531                     target = (PCODE)entryD.Target();
1532                     patch = TRUE;
1533                 }
1534             }
1535         }
1536
1537         /////////////////////////////////////////////////////////////////////////////////////
1538         // Second see if we can find a ResolveCacheElem for this token and type.
1539         // If a match is found, use the target stored in the entry.
1540         if (target == NULL)
1541         {
1542             ResolveCacheElem * elem = NULL;
1543             ResolveCacheEntry entryRC;
1544             Prober probeRC(&entryRC);
1545             if (cache_entries->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeRC))
1546             {
1547                 elem = (ResolveCacheElem *)(cache_entries->Find(&probeRC));
1548                 if (elem  != CALL_STUB_EMPTY_ENTRY)
1549                 {
1550                     target = (PCODE)entryRC.Target();
1551                     patch  = TRUE;
1552                 }
1553             }
1554         }
1555     }
1556     EX_CATCH
1557     {
1558     }
1559     EX_END_CATCH (SwallowAllExceptions);
1560
1561     /////////////////////////////////////////////////////////////////////////////////////
1562     // If we failed to find a target in either the resolver or cache entry hash tables,
1563     // we need to perform a full resolution of the token and type.
1564     //@TODO: Would be nice to add assertion code to ensure we only ever call Resolver once per <token,type>.
1565     if (target == NULL)
1566     {
1567         CONSISTENCY_CHECK(stub == CALL_STUB_EMPTY_ENTRY);
1568         patch = Resolver(objectType, token, protectedObj, &target, TRUE /* throwOnConflict */);
1569
1570 #if defined(_DEBUG)
1571         if (!objectType->IsComObjectType()
1572             && !objectType->IsICastable()
1573             && !objectType->IsIDynamicInterfaceCastable())
1574         {
1575             CONSISTENCY_CHECK(!MethodTable::GetMethodDescForSlotAddress(target)->IsGenericMethodDefinition());
1576         }
1577 #endif // _DEBUG
1578     }
1579
1580     CONSISTENCY_CHECK(target != NULL);
1581
1582     // Now that we've successfully determined the target, we will wrap the remaining logic in a giant
1583     // TRY/CATCH statement because it is there purely to emit stubs and cache entries. In the event
1584     // that emitting stub or cache entries throws an exception (for example, because of OOM), we should
1585     // not fail to perform the required dispatch. This is all because the basic assumption of
1586     // Constrained Execution Regions (CERs) is that all virtual method calls can be made without
1587     // failure.
1588     //
1589     // NOTE: The THROWS contract for this method does not change, because there are still a few special
1590     // cases involving generics that can throw when trying to determine the target method. These cases
1591     // are exceptional and will be documented as unsupported for CERs.
1592     //
1593     // NOTE: We do not try to keep track of the memory that has been allocated throughout this process
1594     // just so we can revert the memory should things fail. This is because we add the elements to the
1595     // hash tables and can be reused later on. Additionally, the hash tables are unlocked so we could
1596     // never remove the elements anyway.
1597     EX_TRY
1598     {
1599         // If we're the shared domain, we can't burn a dispatch stub to the target
1600         // if that target is outside the shared domain (through virtuals
1601         // originating in the shared domain but overridden by a non-shared type and
1602         // called on a collection, like HashTable would call GetHashCode on an
1603         // arbitrary object in its colletion). Dispatch stubs would be hard to clean,
1604         // but resolve stubs are easy to clean because we just clean the cache.
1605         //@TODO: Figure out how to track these indirection cells so that in the
1606         //@TODO: future we can create dispatch stubs for this case.
1607         BOOL bCreateDispatchStub = !bCallToShorterLivedTarget;
1608
1609         DispatchCache::InsertKind insertKind = DispatchCache::IK_NONE;
1610
1611         if (target != NULL)
1612         {
1613             if (patch)
1614             {
1615                 // NOTE: This means that we are sharing dispatch stubs among callsites. If we decide we don't want
1616                 // to do this in the future, just remove this condition
1617                 if (stub == CALL_STUB_EMPTY_ENTRY)
1618                 {
1619                     //we have a target but not the dispatcher stub, lets build it
1620                     //First we need a failure target (the resolver stub)
1621                     ResolveHolder *pResolveHolder = NULL;
1622                     ResolveEntry entryR;
1623                     Prober probeR(&entryR);
1624                     PCODE pBackPatchFcn;
1625                     PCODE pResolverFcn;
1626
1627 #ifdef TARGET_X86
1628                     // Only X86 implementation needs a BackPatch function
1629                     pBackPatchFcn = (PCODE) GetEEFuncEntryPoint(BackPatchWorkerAsmStub);
1630 #else // !TARGET_X86
1631                     pBackPatchFcn = NULL;
1632 #endif // !TARGET_X86
1633
1634 #ifdef CHAIN_LOOKUP
1635                     pResolverFcn  = (PCODE) GetEEFuncEntryPoint(ResolveWorkerChainLookupAsmStub);
1636 #else // CHAIN_LOOKUP
1637                     // Use the slow resolver
1638                     pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerAsmStub);
1639 #endif
1640
1641                     // First see if we've already created a resolve stub for this token
1642                     if (resolvers->SetUpProber(token.To_SIZE_T(), 0, &probeR))
1643                     {
1644                         // Find the right resolver, make it if necessary
1645                         PCODE addrOfResolver = (PCODE)(resolvers->Find(&probeR));
1646                         if (addrOfResolver == CALL_STUB_EMPTY_ENTRY)
1647                         {
1648 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
1649                             MethodDesc* pMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(token, objectType);
1650                             size_t stackArgumentsSize;
1651                             {
1652                                 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
1653                                 stackArgumentsSize = pMD->SizeOfArgStack();
1654                             }
1655 #endif // TARGET_X86 && !UNIX_X86_ABI
1656
1657                             pResolveHolder = GenerateResolveStub(pResolverFcn,
1658                                                              pBackPatchFcn,
1659                                                              token.To_SIZE_T()
1660 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
1661                                                              , stackArgumentsSize
1662 #endif
1663                                                              );
1664
1665                             // Add the resolve entrypoint into the cache.
1666                             //@TODO: Can we store a pointer to the holder rather than the entrypoint?
1667                             resolvers->Add((size_t)(pResolveHolder->stub()->resolveEntryPoint()), &probeR);
1668                         }
1669                         else
1670                         {
1671                             pResolveHolder = ResolveHolder::FromResolveEntry(addrOfResolver);
1672                         }
1673                         CONSISTENCY_CHECK(CheckPointer(pResolveHolder));
1674                         stub = pResolveHolder->stub()->resolveEntryPoint();
1675                         CONSISTENCY_CHECK(stub != NULL);
1676                     }
1677
1678                     // Only create a dispatch stub if:
1679                     //  1. We successfully created or found a resolve stub.
1680                     //  2. We are not blocked from creating a dispatch stub.
1681                     //  3. The call site is currently wired to a lookup stub. If the call site is wired
1682                     //     to anything else, then we're never going to use the dispatch stub so there's
1683                     //     no use in creating it.
1684                     if (pResolveHolder != NULL && stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1685                     {
1686                         DispatchEntry entryD;
1687                         Prober probeD(&entryD);
1688                         if (bCreateDispatchStub &&
1689                             dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1690                         {
1691                             // We are allowed to create a reusable dispatch stub for all assemblies
1692                             // this allows us to optimize the call interception case the same way
1693                             DispatchHolder *pDispatchHolder = NULL;
1694                             PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
1695                             if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
1696                             {
1697                                 PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
1698                                 bool reenteredCooperativeGCMode = false;
1699                                 pDispatchHolder = GenerateDispatchStub(
1700                                     target, addrOfFail, objectType, token.To_SIZE_T(), &reenteredCooperativeGCMode);
1701                                 if (reenteredCooperativeGCMode)
1702                                 {
1703                                     // The prober may have been invalidated by reentering cooperative GC mode, reset it
1704                                     BOOL success = dispatchers->SetUpProber(token.To_SIZE_T(), (size_t)objectType, &probeD);
1705                                     _ASSERTE(success);
1706                                 }
1707                                 dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
1708                             }
1709                             else
1710                             {
1711                                 pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
1712                             }
1713
1714                             // Now assign the entrypoint to stub
1715                             CONSISTENCY_CHECK(CheckPointer(pDispatchHolder));
1716                             stub = pDispatchHolder->stub()->entryPoint();
1717                             CONSISTENCY_CHECK(stub != NULL);
1718                         }
1719                         else
1720                         {
1721                             insertKind = DispatchCache::IK_SHARED;
1722                         }
1723                     }
1724                 }
1725             }
1726             else
1727             {
1728                 stats.worker_call_no_patch++;
1729             }
1730         }
1731
1732         // When we get here, target is where to go to
1733         // and patch is TRUE, telling us that we may have to back patch the call site with stub
1734         if (stub != CALL_STUB_EMPTY_ENTRY)
1735         {
1736             _ASSERTE(patch);
1737
1738             // If we go here and have a dispatching stub in hand, it probably means
1739             // that the cache used by the resolve stubs (g_resolveCache) does not have this stub,
1740             // so insert it.
1741             //
1742             // We only insert into the cache if we have a ResolveStub or we have a DispatchStub
1743             // that missed, since we want to keep the resolve cache empty of unused entries.
1744             // If later the dispatch stub fails (because of another type at the call site),
1745             // we'll insert the new value into the cache for the next time.
1746             // Note that if we decide to skip creating a DispatchStub beacuise we are calling
1747             // from a shared to unshared domain the we also will insert into the cache.
1748
1749             if (insertKind == DispatchCache::IK_NONE)
1750             {
1751                 if (stubKind == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
1752                 {
1753                     insertKind = DispatchCache::IK_DISPATCH;
1754                 }
1755                 else if (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
1756                 {
1757                     insertKind = DispatchCache::IK_RESOLVE;
1758                 }
1759             }
1760
1761             if (insertKind != DispatchCache::IK_NONE)
1762             {
1763                 VirtualCallStubManager * pMgrForCacheElem = this;
1764
1765                 // If we're calling from shared to unshared, make sure the cache element is
1766                 // allocated in the unshared manager so that when the unshared code unloads
1767                 // the cache element is unloaded.
1768                 if (bCallToShorterLivedTarget)
1769                 {
1770                     _ASSERTE(pCalleeMgr != NULL);
1771                     pMgrForCacheElem = pCalleeMgr;
1772                 }
1773
1774                 // Find or create a new ResolveCacheElem
1775                 ResolveCacheElem *e = pMgrForCacheElem->GetResolveCacheElem(objectType, token.To_SIZE_T(), (void *)target);
1776
1777                 // Try to insert this entry into the resolver cache table
1778                 // When we get a collision we may decide not to insert this element
1779                 // and Insert will return FALSE if we decided not to add the entry
1780 #ifdef STUB_LOGGING
1781                 BOOL didInsert =
1782 #endif
1783                     g_resolveCache->Insert(e, insertKind);
1784
1785 #ifdef STUB_LOGGING
1786                 if ((STUB_COLLIDE_MONO_PCT > 0) && !didInsert && (stubKind == STUB_CODE_BLOCK_VSD_RESOLVE_STUB))
1787                 {
1788                     // If we decided not to perform the insert and we came in with a resolve stub
1789                     // then we currently have a polymorphic callsite, So we flip a coin to decide
1790                     // whether to convert this callsite back into a dispatch stub (monomorphic callsite)
1791
1792                     if (!bCallToShorterLivedTarget && bCreateDispatchStub)
1793                     {
1794                         // We are allowed to create a reusable dispatch stub for all assemblies
1795                         // this allows us to optimize the call interception case the same way
1796
1797                         UINT32 coin = UINT32(GetRandomInt(100));
1798
1799                         if (coin < STUB_COLLIDE_MONO_PCT)
1800                         {
1801                             DispatchEntry entryD;
1802                             Prober probeD(&entryD);
1803                             if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1804                             {
1805                                 DispatchHolder *pDispatchHolder = NULL;
1806                                 PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
1807                                 if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
1808                                 {
1809                                     // It is possible that we never created this monomorphic dispatch stub
1810                                     // so we may have to create it now
1811                                     ResolveHolder* pResolveHolder = ResolveHolder::FromResolveEntry(pCallSite->GetSiteTarget());
1812                                     PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
1813                                     bool reenteredCooperativeGCMode = false;
1814                                     pDispatchHolder = GenerateDispatchStub(
1815                                         target, addrOfFail, objectType, token.To_SIZE_T(), &reenteredCooperativeGCMode);
1816                                     if (reenteredCooperativeGCMode)
1817                                     {
1818                                         // The prober may have been invalidated by reentering cooperative GC mode, reset it
1819                                         BOOL success = dispatchers->SetUpProber(token.To_SIZE_T(), (size_t)objectType, &probeD);
1820                                         _ASSERTE(success);
1821                                     }
1822                                     dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
1823                                 }
1824                                 else
1825                                 {
1826                                     pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
1827                                 }
1828
1829                                 // increment the of times we changed a cache collision into a mono stub
1830                                 stats.worker_collide_to_mono++;
1831
1832                                 // Now assign the entrypoint to stub
1833                                 CONSISTENCY_CHECK(pDispatchHolder != NULL);
1834                                 stub = pDispatchHolder->stub()->entryPoint();
1835                                 CONSISTENCY_CHECK(stub != NULL);
1836                             }
1837                         }
1838                     }
1839                 }
1840 #endif // STUB_LOGGING
1841             }
1842
1843             if (stubKind == STUB_CODE_BLOCK_VSD_LOOKUP_STUB)
1844             {
1845                 BackPatchSite(pCallSite, (PCODE)stub);
1846             }
1847         }
1848     }
1849     EX_CATCH
1850     {
1851     }
1852     EX_END_CATCH (SwallowAllExceptions);
1853
1854     // Target can be NULL only if we can't resolve to an address
1855     _ASSERTE(target != NULL);
1856
1857     return target;
1858 }
1859
1860 /*
1861 Resolve the token in the context of the method table, and set the target to point to
1862 the address that we should go to get to the implementation.  Return a boolean indicating
1863 whether or not this is a permenent choice or a temporary choice.  For example, if the code has
1864 not been jitted yet, return FALSE and set the target to the prestub.  If the target is set to NULL,
1865 it means that the token is not resolvable.
1866 */
1867 BOOL
1868 VirtualCallStubManager::Resolver(
1869     MethodTable * pMT,
1870     DispatchToken token,
1871     OBJECTREF   * protectedObj, // this one can actually be NULL, consider using pMT is you don't need the object itself
1872     PCODE *       ppTarget,
1873     BOOL          throwOnConflict)
1874 {
1875     CONTRACTL {
1876         THROWS;
1877         GC_TRIGGERS;
1878         PRECONDITION(CheckPointer(pMT));
1879         PRECONDITION(TypeHandle(pMT).CheckFullyLoaded());
1880     } CONTRACTL_END;
1881
1882 #ifdef _DEBUG
1883     MethodTable * dbg_pTokenMT = pMT;
1884     MethodDesc *  dbg_pTokenMD = NULL;
1885     if (token.IsTypedToken())
1886     {
1887         dbg_pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
1888         dbg_pTokenMD = dbg_pTokenMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
1889     }
1890 #endif // _DEBUG
1891
1892     // NOTE: CERs are not hardened against transparent proxy types,
1893     // so no need to worry about throwing an exception from here.
1894
1895     LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (start) looking up %s method in %s\n",
1896          token.IsThisToken() ? "this" : "interface",
1897          pMT->GetClass()->GetDebugClassName()));
1898
1899     MethodDesc * pMD = NULL;
1900     BOOL fShouldPatch = FALSE;
1901     DispatchSlot implSlot(pMT->FindDispatchSlot(token.GetTypeID(), token.GetSlotNumber(), throwOnConflict));
1902
1903     // If we found a target, then just figure out if we're allowed to create a stub around
1904     // this target and backpatch the callsite.
1905     if (!implSlot.IsNull())
1906     {
1907 #if defined(LOGGING) || defined(_DEBUG)
1908         {
1909             pMD = implSlot.GetMethodDesc();
1910             if (pMD != NULL)
1911             {
1912                 // Make sure we aren't crossing app domain boundaries
1913                 CONSISTENCY_CHECK(GetAppDomain()->CheckValidModule(pMD->GetModule()));
1914 #ifdef LOGGING
1915                 WORD slot = pMD->GetSlot();
1916                 BOOL fIsOverriddenMethod =
1917                     (pMT->GetNumParentVirtuals() <= slot && slot < pMT->GetNumVirtuals());
1918                 LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (end) looked up %s %s method %s::%s\n",
1919                      fIsOverriddenMethod ? "overridden" : "newslot",
1920                      token.IsThisToken() ? "this" : "interface",
1921                      pMT->GetClass()->GetDebugClassName(),
1922                      pMD->GetName()));
1923 #endif // LOGGING
1924             }
1925         }
1926 #endif // defined(LOGGING) || defined(_DEBUG)
1927
1928         BOOL fSlotCallsPrestub = DoesSlotCallPrestub(implSlot.GetTarget());
1929         if (!fSlotCallsPrestub)
1930         {
1931             // Skip fixup precode jump for better perf
1932             PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(implSlot.GetTarget());
1933             if (pDirectTarget != NULL)
1934                 implSlot = DispatchSlot(pDirectTarget);
1935
1936             // Only patch to a target if it's not going to call the prestub.
1937             fShouldPatch = TRUE;
1938         }
1939         else
1940         {
1941             // Getting the MethodDesc is very expensive,
1942             // so only call this when we are calling the prestub
1943             pMD = implSlot.GetMethodDesc();
1944
1945             if (pMD == NULL)
1946             {
1947                 // pMD can be NULL when another thread raced in and patched the Method Entry Point
1948                 // so that it no longer points at the prestub
1949                 // In such a case DoesSlotCallPrestub will now return FALSE
1950                 CONSISTENCY_CHECK(!DoesSlotCallPrestub(implSlot.GetTarget()));
1951                 fSlotCallsPrestub = FALSE;
1952             }
1953
1954             if (!fSlotCallsPrestub)
1955             {
1956                 // Only patch to a target if it's not going to call the prestub.
1957                 fShouldPatch = TRUE;
1958             }
1959             else
1960             {
1961                 CONSISTENCY_CHECK(CheckPointer(pMD));
1962                 if (pMD->IsGenericMethodDefinition())
1963                 {
1964                     //@GENERICS: Currently, generic virtual methods are called only through JIT_VirtualFunctionPointer
1965                     //           and so we could never have a virtual call stub at a call site for a generic virtual.
1966                     //           As such, we're assuming the only callers to Resolver are calls to GetTarget caused
1967                     //           indirectly by JIT_VirtualFunctionPointer. So, we're return TRUE for patching so that
1968                     //           we can cache the result in GetTarget and we don't have to perform the full resolve
1969                     //           every time. If the way we call generic virtual methods changes, this will also need
1970                     //           to change.
1971                     fShouldPatch = TRUE;
1972                 }
1973             }
1974         }
1975     }
1976 #ifdef FEATURE_COMINTEROP
1977     else if (pMT->IsComObjectType() && IsInterfaceToken(token))
1978     {
1979         MethodTable * pItfMT = GetTypeFromToken(token);
1980         implSlot = pItfMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict);
1981
1982         _ASSERTE(!pItfMT->HasInstantiation());
1983
1984         fShouldPatch = TRUE;
1985     }
1986 #endif // FEATURE_COMINTEROP
1987 #ifdef FEATURE_ICASTABLE
1988     else if (pMT->IsICastable() && protectedObj != NULL && *protectedObj != NULL)
1989     {
1990         GCStress<cfg_any>::MaybeTrigger();
1991
1992         // In case of ICastable, instead of trying to find method implementation in the real object type
1993         // we call Resolver() again with whatever type it returns.
1994         // It allows objects that implement ICastable to mimic behavior of other types.
1995         MethodTable * pTokenMT = GetTypeFromToken(token);
1996
1997         // Make call to ICastableHelpers.GetImplType(this, interfaceTypeObj)
1998         PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICASTABLEHELPERS__GETIMPLTYPE);
1999
2000         OBJECTREF tokenManagedType = pTokenMT->GetManagedClassObject(); //GC triggers
2001
2002         DECLARE_ARGHOLDER_ARRAY(args, 2);
2003         args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*protectedObj);
2004         args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(tokenManagedType);
2005
2006         OBJECTREF impTypeObj = NULL;
2007         CALL_MANAGED_METHOD_RETREF(impTypeObj, OBJECTREF, args);
2008
2009         INDEBUG(tokenManagedType = NULL); //tokenManagedType wasn't protected during the call
2010         if (impTypeObj == NULL) // GetImplType returns default(RuntimeTypeHandle)
2011         {
2012             COMPlusThrow(kEntryPointNotFoundException);
2013         }
2014
2015         ReflectClassBaseObject* resultTypeObj = ((ReflectClassBaseObject*)OBJECTREFToObject(impTypeObj));
2016         TypeHandle resultTypeHnd = resultTypeObj->GetType();
2017         MethodTable *pResultMT = resultTypeHnd.GetMethodTable();
2018
2019         return Resolver(pResultMT, token, protectedObj, ppTarget, throwOnConflict);
2020     }
2021 #endif // FEATURE_ICASTABLE
2022     else if (pMT->IsIDynamicInterfaceCastable()
2023         && protectedObj != NULL
2024         && *protectedObj != NULL
2025         && IsInterfaceToken(token))
2026     {
2027         MethodTable *pTokenMT = GetTypeFromToken(token);
2028
2029         OBJECTREF implTypeRef = DynamicInterfaceCastable::GetInterfaceImplementation(protectedObj, TypeHandle(pTokenMT));
2030         _ASSERTE(implTypeRef != NULL);
2031
2032         ReflectClassBaseObject *implTypeObj = ((ReflectClassBaseObject *)OBJECTREFToObject(implTypeRef));
2033         TypeHandle implTypeHandle = implTypeObj->GetType();
2034         return Resolver(implTypeHandle.GetMethodTable(), token, protectedObj, ppTarget, throwOnConflict);
2035     }
2036
2037     if (implSlot.IsNull())
2038     {
2039         MethodTable * pTokenMT = NULL;
2040         MethodDesc *  pTokenMD = NULL;
2041         if (token.IsTypedToken())
2042         {
2043             pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2044             pTokenMD = pTokenMT->FindDispatchSlot(TYPE_ID_THIS_CLASS, token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
2045         }
2046
2047 #ifdef FEATURE_COMINTEROP
2048         if ((pTokenMT != NULL) && (pTokenMT->GetClass()->IsEquivalentType()))
2049         {
2050             SString methodName;
2051             DefineFullyQualifiedNameForClassW();
2052             pTokenMD->GetFullMethodInfo(methodName);
2053
2054             COMPlusThrowHR(COR_E_MISSINGMETHOD, COR_E_MISSINGMETHOD, GetFullyQualifiedNameForClassNestedAwareW(pMT), methodName.GetUnicode());
2055         }
2056         else
2057 #endif // FEATURE_COMINTEROP
2058         if (!throwOnConflict)
2059         {
2060             // Assume we got null because there was a default interface method conflict
2061             *ppTarget = NULL;
2062             return FALSE;
2063         }
2064         else
2065         {
2066             // Method not found. In the castable object scenario where the method is being resolved on an interface itself,
2067             // this can happen if the user tried to call a method without a default implementation. Outside of that case,
2068             // this should never happen for anything but equivalent types
2069             CONSISTENCY_CHECK((!implSlot.IsNull() || pMT->IsInterface()) && "Valid method implementation was not found.");
2070             COMPlusThrow(kEntryPointNotFoundException);
2071         }
2072     }
2073
2074     *ppTarget = implSlot.GetTarget();
2075
2076     return fShouldPatch;
2077 } // VirtualCallStubManager::Resolver
2078
2079 #endif // !DACCESS_COMPILE
2080
2081 //----------------------------------------------------------------------------
2082 // Given a contract, return true if the contract represents a slot on the target.
2083 BOOL VirtualCallStubManager::IsClassToken(DispatchToken token)
2084 {
2085     CONTRACT (BOOL) {
2086         NOTHROW;
2087         GC_NOTRIGGER;
2088     } CONTRACT_END;
2089     RETURN (token.IsThisToken());
2090 }
2091
2092 //----------------------------------------------------------------------------
2093 // Given a contract, return true if the contract represents an interface, false if just a slot.
2094 BOOL VirtualCallStubManager::IsInterfaceToken(DispatchToken token)
2095 {
2096     CONTRACT (BOOL) {
2097         NOTHROW;
2098         GC_NOTRIGGER;
2099     } CONTRACT_END;
2100     BOOL ret = token.IsTypedToken();
2101     // For now, only interfaces have typed dispatch tokens.
2102     CONSISTENCY_CHECK(!ret || CheckPointer(GetThread()->GetDomain()->LookupType(token.GetTypeID())));
2103     CONSISTENCY_CHECK(!ret || GetThread()->GetDomain()->LookupType(token.GetTypeID())->IsInterface());
2104     RETURN (ret);
2105 }
2106
2107 #ifndef DACCESS_COMPILE
2108
2109 //----------------------------------------------------------------------------
2110 MethodDesc *
2111 VirtualCallStubManager::GetRepresentativeMethodDescFromToken(
2112     DispatchToken token,
2113     MethodTable * pMT)
2114 {
2115     CONTRACT (MethodDesc *) {
2116         NOTHROW;
2117         GC_NOTRIGGER;
2118         MODE_COOPERATIVE;
2119         PRECONDITION(CheckPointer(pMT));
2120         POSTCONDITION(CheckPointer(RETVAL));
2121     } CONTRACT_END;
2122
2123     // This is called when trying to create a HelperMethodFrame, which means there are
2124     // potentially managed references on the stack that are not yet protected.
2125     GCX_FORBID();
2126
2127     if (token.IsTypedToken())
2128     {
2129         pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2130         CONSISTENCY_CHECK(CheckPointer(pMT));
2131         token = DispatchToken::CreateDispatchToken(token.GetSlotNumber());
2132     }
2133     CONSISTENCY_CHECK(token.IsThisToken());
2134     RETURN (pMT->GetMethodDescForSlot(token.GetSlotNumber()));
2135 }
2136
2137 //----------------------------------------------------------------------------
2138 MethodTable *VirtualCallStubManager::GetTypeFromToken(DispatchToken token)
2139 {
2140     CONTRACTL {
2141         NOTHROW;
2142         WRAPPER(GC_TRIGGERS);
2143     } CONTRACTL_END;
2144     MethodTable *pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2145     _ASSERTE(pMT != NULL);
2146     _ASSERTE(pMT->LookupTypeID() == token.GetTypeID());
2147     return pMT;
2148 }
2149
2150 #endif // !DACCESS_COMPILE
2151
2152 //----------------------------------------------------------------------------
2153 MethodDesc *VirtualCallStubManager::GetInterfaceMethodDescFromToken(DispatchToken token)
2154 {
2155     CONTRACTL {
2156         NOTHROW;
2157         WRAPPER(GC_TRIGGERS);
2158         PRECONDITION(IsInterfaceToken(token));
2159     } CONTRACTL_END;
2160
2161 #ifndef DACCESS_COMPILE
2162
2163     MethodTable * pMT = GetTypeFromToken(token);
2164     PREFIX_ASSUME(pMT != NULL);
2165     CONSISTENCY_CHECK(CheckPointer(pMT));
2166     return pMT->GetMethodDescForSlot(token.GetSlotNumber());
2167
2168 #else // DACCESS_COMPILE
2169
2170     DacNotImpl();
2171     return NULL;
2172
2173 #endif // DACCESS_COMPILE
2174 }
2175
2176 #ifndef DACCESS_COMPILE
2177
2178 //----------------------------------------------------------------------------
2179 // This will check to see if a match is in the cache.
2180 // Returns the target on success, otherwise NULL.
2181 PCODE VirtualCallStubManager::CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT)
2182 {
2183     CONTRACTL {
2184         NOTHROW;
2185         GC_NOTRIGGER;
2186         PRECONDITION(CheckPointer(pMT));
2187     } CONTRACTL_END
2188
2189     // Now look in the cache for a match
2190     ResolveCacheElem *pElem = g_resolveCache->Lookup(token, tokenHash, pMT);
2191
2192     // If the element matches, return the target - we're done!
2193     return (PCODE)(pElem != NULL ? pElem->target : NULL);
2194 }
2195
2196
2197 //----------------------------------------------------------------------------
2198 /* static */
2199 PCODE
2200 VirtualCallStubManager::GetTarget(
2201     DispatchToken token,
2202     MethodTable * pMT,
2203     BOOL throwOnConflict)
2204 {
2205     CONTRACTL {
2206         THROWS;
2207         GC_TRIGGERS;
2208         INJECT_FAULT(COMPlusThrowOM(););
2209         PRECONDITION(CheckPointer(pMT));
2210     } CONTRACTL_END
2211
2212     g_external_call++;
2213
2214     if (token.IsThisToken())
2215     {
2216         return pMT->GetRestoredSlot(token.GetSlotNumber());
2217     }
2218
2219     GCX_COOP(); // This is necessary for BucketTable synchronization
2220
2221     PCODE target = NULL;
2222
2223 #ifndef STUB_DISPATCH_PORTABLE
2224     target = CacheLookup(token.To_SIZE_T(), DispatchCache::INVALID_HASH, pMT);
2225     if (target != NULL)
2226         return target;
2227 #endif // !STUB_DISPATCH_PORTABLE
2228
2229     // No match, now do full resolve
2230     BOOL fPatch;
2231
2232     // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
2233     // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
2234     fPatch = Resolver(pMT, token, NULL, &target, throwOnConflict);
2235     _ASSERTE(!throwOnConflict || target != NULL);
2236
2237 #ifndef STUB_DISPATCH_PORTABLE
2238     if (fPatch)
2239     {
2240         ResolveCacheElem *pCacheElem = pMT->GetLoaderAllocator()->GetVirtualCallStubManager()->
2241             GetResolveCacheElem(pMT, token.To_SIZE_T(), (BYTE *)target);
2242
2243         if (pCacheElem)
2244         {
2245             if (!g_resolveCache->Insert(pCacheElem, DispatchCache::IK_EXTERNAL))
2246             {
2247                 // We decided not to perform the insert
2248             }
2249         }
2250     }
2251     else
2252     {
2253         g_external_call_no_patch++;
2254     }
2255 #endif // !STUB_DISPATCH_PORTABLE
2256
2257     return target;
2258 }
2259
2260 #endif // !DACCESS_COMPILE
2261
2262 //----------------------------------------------------------------------------
2263 /*
2264 Resolve the token in the context of the method table, and set the target to point to
2265 the address that we should go to get to the implementation.  Return a boolean indicating
2266 whether or not this is a permenent choice or a temporary choice.  For example, if the code has
2267 not been jitted yet, return FALSE and set the target to the prestub.  If the target is set to NULL,
2268 it means that the token is not resolvable.
2269 */
2270 BOOL
2271 VirtualCallStubManager::TraceResolver(
2272     Object *           pObj,
2273     DispatchToken      token,
2274     TraceDestination * trace)
2275 {
2276     CONTRACTL {
2277         THROWS;
2278         GC_TRIGGERS;
2279         PRECONDITION(CheckPointer(pObj, NULL_OK));
2280         INJECT_FAULT(COMPlusThrowOM(););
2281     } CONTRACTL_END
2282
2283     // If someone is trying to step into a stub dispatch call on a null object,
2284     // just say that we can't trace this call and we'll just end up throwing
2285     // a null ref exception.
2286     if (pObj == NULL)
2287     {
2288         return FALSE;
2289     }
2290
2291     MethodTable *pMT = pObj->GetMethodTable();
2292     CONSISTENCY_CHECK(CheckPointer(pMT));
2293
2294     DispatchSlot slot(pMT->FindDispatchSlot(token.GetTypeID(), token.GetSlotNumber(), FALSE /* throwOnConflict */));
2295     if (slot.IsNull() && IsInterfaceToken(token) && pMT->IsComObjectType())
2296     {
2297         MethodDesc * pItfMD = GetInterfaceMethodDescFromToken(token);
2298         CONSISTENCY_CHECK(pItfMD->GetMethodTable()->GetSlot(pItfMD->GetSlot()) == pItfMD->GetMethodEntryPoint());
2299
2300         // Look up the slot on the interface itself.
2301         slot = pItfMD->GetMethodTable()->FindDispatchSlot(TYPE_ID_THIS_CLASS, pItfMD->GetSlot(), FALSE /* throwOnConflict */);
2302     }
2303
2304     // The dispatch slot's target may change due to code versioning shortly after it was retrieved above for the trace. This
2305     // will result in the debugger getting some version of the code or the prestub, but not necessarily the exact code pointer
2306     // that winds up getting executed. The debugger has code that handles this ambiguity by placing a breakpoint at the start of
2307     // all native code versions, even if they aren't the one that was reported by this trace, see
2308     // DebuggerController::PatchTrace() under case TRACE_MANAGED. This alleviates the StubManager from having to prevent the
2309     // race that occurs here.
2310     //
2311     // If the dispatch slot is null, we assume it's because of a diamond case in default interface method dispatch.
2312     return slot.IsNull() ? FALSE : (StubManager::TraceStub(slot.GetTarget(), trace));
2313 }
2314
2315 #ifndef DACCESS_COMPILE
2316
2317 //----------------------------------------------------------------------------
2318 /* Change the call site.  It is failing the expected MT test in the dispatcher stub
2319 too often.
2320 */
2321 void VirtualCallStubManager::BackPatchWorker(StubCallSite* pCallSite)
2322 {
2323     CONTRACTL {
2324         NOTHROW;
2325         GC_NOTRIGGER;
2326         FORBID_FAULT;
2327     } CONTRACTL_END
2328
2329     PCODE callSiteTarget = pCallSite->GetSiteTarget();
2330
2331     if (isDispatchingStubStatic(callSiteTarget))
2332     {
2333         DispatchHolder * dispatchHolder = DispatchHolder::FromDispatchEntry(callSiteTarget);
2334         DispatchStub *   dispatchStub   = dispatchHolder->stub();
2335
2336         //yes, patch it to point to the resolve stub
2337         //We can ignore the races now since we now know that the call site does go thru our
2338         //stub mechanisms, hence no matter who wins the race, we are correct.
2339         //We find the correct resolve stub by following the failure path in the dispatcher stub itself
2340         PCODE failEntry    = dispatchStub->failTarget();
2341         ResolveStub* resolveStub  = ResolveHolder::FromFailEntry(failEntry)->stub();
2342         PCODE resolveEntry = resolveStub->resolveEntryPoint();
2343         BackPatchSite(pCallSite, resolveEntry);
2344
2345         LOG((LF_STUBS, LL_INFO10000, "BackPatchWorker call-site" FMT_ADDR "dispatchStub" FMT_ADDR "\n",
2346              DBG_ADDR(pCallSite->GetReturnAddress()), DBG_ADDR(dispatchHolder->stub())));
2347
2348         //Add back the default miss count to the counter being used by this resolve stub
2349         //Since resolve stub are shared among many dispatch stubs each dispatch stub
2350         //that fails decrements the shared counter and the dispatch stub that trips the
2351         //counter gets converted into a polymorphic site
2352         INT32* counter = resolveStub->pCounter();
2353         *counter += STUB_MISS_COUNT_VALUE;
2354     }
2355 }
2356
2357 //----------------------------------------------------------------------------
2358 /* consider changing the call site to point to stub, if appropriate do it
2359 */
2360 void VirtualCallStubManager::BackPatchSite(StubCallSite* pCallSite, PCODE stub)
2361 {
2362     CONTRACTL {
2363         NOTHROW;
2364         GC_NOTRIGGER;
2365         FORBID_FAULT;
2366         PRECONDITION(stub != NULL);
2367         PRECONDITION(CheckPointer(pCallSite));
2368         PRECONDITION(pCallSite->GetSiteTarget() != NULL);
2369     } CONTRACTL_END
2370
2371     PCODE patch = stub;
2372
2373     // This will take care of the prejit case and find the actual patch site
2374     PCODE prior = pCallSite->GetSiteTarget();
2375
2376     //is this really going to change anything, if not don't do it.
2377     if (prior == patch)
2378         return;
2379
2380     //we only want to do the following transitions for right now:
2381     //  prior           new
2382     //  lookup          dispatching or resolving
2383     //  dispatching     resolving
2384     if (isResolvingStubStatic(prior))
2385         return;
2386
2387     if(isDispatchingStubStatic(stub))
2388     {
2389         if(isDispatchingStubStatic(prior))
2390         {
2391             return;
2392         }
2393         else
2394         {
2395             stats.site_write_mono++;
2396         }
2397     }
2398     else
2399     {
2400         stats.site_write_poly++;
2401     }
2402
2403     //patch the call site
2404     pCallSite->SetSiteTarget(patch);
2405
2406     stats.site_write++;
2407 }
2408
2409 //----------------------------------------------------------------------------
2410 void StubCallSite::SetSiteTarget(PCODE newTarget)
2411 {
2412     WRAPPER_NO_CONTRACT;
2413     PTR_PCODE pCell = GetIndirectCell();
2414     *pCell = newTarget;
2415 }
2416
2417 //----------------------------------------------------------------------------
2418 /* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2419 are the addresses the stub is to transfer to depending on the test with pMTExpected
2420 */
2421 DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE            addrOfCode,
2422                                                              PCODE            addrOfFail,
2423                                                              void *           pMTExpected,
2424                                                              size_t           dispatchToken,
2425                                                              bool *           pMayHaveReenteredCooperativeGCMode)
2426 {
2427     CONTRACT (DispatchHolder*) {
2428         THROWS;
2429         GC_TRIGGERS;
2430         INJECT_FAULT(COMPlusThrowOM(););
2431         PRECONDITION(addrOfCode != NULL);
2432         PRECONDITION(addrOfFail != NULL);
2433         PRECONDITION(CheckPointer(pMTExpected));
2434         PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2435         PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2436         POSTCONDITION(CheckPointer(RETVAL));
2437     } CONTRACT_END;
2438
2439     size_t dispatchHolderSize = sizeof(DispatchHolder);
2440
2441 #ifdef TARGET_AMD64
2442     // See comment around m_fShouldAllocateLongJumpDispatchStubs for explanation.
2443     if (m_fShouldAllocateLongJumpDispatchStubs
2444         INDEBUG(|| g_pConfig->ShouldGenerateLongJumpDispatchStub()))
2445     {
2446         RETURN GenerateDispatchStubLong(addrOfCode,
2447                                         addrOfFail,
2448                                         pMTExpected,
2449                                         dispatchToken,
2450                                         pMayHaveReenteredCooperativeGCMode);
2451     }
2452
2453     dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT);
2454 #endif
2455
2456     //allocate from the requisite heap and copy the template over it.
2457     DispatchHolder * holder = (DispatchHolder*) (void*)
2458         dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
2459
2460 #ifdef TARGET_AMD64
2461     if (!DispatchHolder::CanShortJumpDispatchStubReachFailTarget(addrOfFail, (LPCBYTE)holder))
2462     {
2463         m_fShouldAllocateLongJumpDispatchStubs = TRUE;
2464         RETURN GenerateDispatchStub(addrOfCode, addrOfFail, pMTExpected, dispatchToken, pMayHaveReenteredCooperativeGCMode);
2465     }
2466 #endif
2467
2468     ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
2469     dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
2470                        addrOfFail,
2471                        (size_t)pMTExpected
2472 #ifdef TARGET_AMD64
2473                        , DispatchStub::e_TYPE_SHORT
2474 #endif
2475                        );
2476
2477 #ifdef FEATURE_CODE_VERSIONING
2478     MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
2479     if (pMD->IsVersionableWithVtableSlotBackpatch())
2480     {
2481         EntryPointSlots::SlotType slotType;
2482         TADDR slot = holder->stub()->implTargetSlot(&slotType);
2483         pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
2484
2485         // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2486         *pMayHaveReenteredCooperativeGCMode = true;
2487     }
2488 #endif
2489
2490     ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2491
2492     //incr our counters
2493     stats.stub_mono_counter++;
2494     stats.stub_space += (UINT32)dispatchHolderSize;
2495     LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2496                                  DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2497
2498 #ifdef FEATURE_PERFMAP
2499     PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2500 #endif
2501
2502     RETURN (holder);
2503 }
2504
2505 #ifdef TARGET_AMD64
2506 //----------------------------------------------------------------------------
2507 /* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2508 are the addresses the stub is to transfer to depending on the test with pMTExpected
2509 */
2510 DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE            addrOfCode,
2511                                                                  PCODE            addrOfFail,
2512                                                                  void *           pMTExpected,
2513                                                                  size_t           dispatchToken,
2514                                                                  bool *           pMayHaveReenteredCooperativeGCMode)
2515 {
2516     CONTRACT (DispatchHolder*) {
2517         THROWS;
2518         GC_TRIGGERS;
2519         INJECT_FAULT(COMPlusThrowOM(););
2520         PRECONDITION(addrOfCode != NULL);
2521         PRECONDITION(addrOfFail != NULL);
2522         PRECONDITION(CheckPointer(pMTExpected));
2523         PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2524         PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2525         POSTCONDITION(CheckPointer(RETVAL));
2526     } CONTRACT_END;
2527
2528     //allocate from the requisite heap and copy the template over it.
2529     size_t dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG);
2530     DispatchHolder * holder = (DispatchHolder*) (void*)dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
2531     ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
2532
2533     dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
2534                        addrOfFail,
2535                        (size_t)pMTExpected,
2536                        DispatchStub::e_TYPE_LONG);
2537
2538 #ifdef FEATURE_CODE_VERSIONING
2539     MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(addrOfCode);
2540     if (pMD->IsVersionableWithVtableSlotBackpatch())
2541     {
2542         EntryPointSlots::SlotType slotType;
2543         TADDR slot = holder->stub()->implTargetSlot(&slotType);
2544         pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
2545
2546         // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2547         *pMayHaveReenteredCooperativeGCMode = true;
2548     }
2549 #endif
2550
2551     ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2552
2553     //incr our counters
2554     stats.stub_mono_counter++;
2555     stats.stub_space += static_cast<UINT32>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG));
2556     LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2557                                  DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2558
2559 #ifdef FEATURE_PERFMAP
2560     PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2561 #endif
2562
2563     RETURN (holder);
2564 }
2565 #endif
2566
2567 //----------------------------------------------------------------------------
2568 /* Generate a resolve stub for the given dispatchToken.
2569 addrOfResolver is where to go if the inline cache check misses
2570 addrOfPatcher is who to call if the fail piece is being called too often by dispacher stubs
2571 */
2572 ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE            addrOfResolver,
2573                                                            PCODE            addrOfPatcher,
2574                                                            size_t           dispatchToken
2575 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
2576                                                            , size_t         stackArgumentsSize
2577 #endif
2578                                                            )
2579 {
2580     CONTRACT (ResolveHolder*) {
2581         THROWS;
2582         GC_TRIGGERS;
2583         INJECT_FAULT(COMPlusThrowOM(););
2584         PRECONDITION(addrOfResolver != NULL);
2585 #if defined(TARGET_X86)
2586         PRECONDITION(addrOfPatcher != NULL);
2587 #endif
2588         POSTCONDITION(CheckPointer(RETVAL));
2589     } CONTRACT_END;
2590
2591     _ASSERTE(addrOfResolver);
2592
2593     //get a counter for the fail piece
2594
2595     UINT32         counter_index = counter_block::MAX_COUNTER_ENTRIES;
2596     counter_block *cur_block     = NULL;
2597
2598     while (true)
2599     {
2600         cur_block = VolatileLoad(&m_cur_counter_block);
2601
2602         if ((cur_block != NULL) && (cur_block->used < counter_block::MAX_COUNTER_ENTRIES))
2603         {
2604             counter_index = InterlockedIncrement((LONG*)&cur_block->used) - 1;
2605             if (counter_index < counter_block::MAX_COUNTER_ENTRIES)
2606             {
2607                 // Typical case we allocate the next free counter in the block
2608                 break;
2609             }
2610         }
2611
2612         // Otherwise we have to create a new counter_block to serve as the head of m_cur_counter_block list
2613
2614         // Create the new block in the main heap
2615         counter_block *pNew = new counter_block;
2616
2617         // Initialize the new block
2618         pNew->next = cur_block;
2619         pNew->used = 0;
2620
2621         // Try to link in the new block
2622         if (InterlockedCompareExchangeT(&m_cur_counter_block, pNew, cur_block) != cur_block)
2623         {
2624             // Lost a race to add pNew as new head
2625             delete pNew;
2626         }
2627     }
2628
2629     CONSISTENCY_CHECK(counter_index < counter_block::MAX_COUNTER_ENTRIES);
2630     CONSISTENCY_CHECK(CheckPointer(cur_block));
2631
2632     // Initialize the default miss counter for this resolve stub
2633     INT32* counterAddr = &(cur_block->block[counter_index]);
2634     *counterAddr = STUB_MISS_COUNT_VALUE;
2635
2636     //allocate from the requisite heap and copy the templates for each piece over it.
2637     ResolveHolder * holder = (ResolveHolder*) (void*)
2638         resolve_heap->AllocAlignedMem(sizeof(ResolveHolder), CODE_SIZE_ALIGN);
2639     ExecutableWriterHolder<ResolveHolder> resolveWriterHolder(holder, sizeof(ResolveHolder));
2640
2641     resolveWriterHolder.GetRW()->Initialize(holder,
2642                        addrOfResolver, addrOfPatcher,
2643                        dispatchToken, DispatchCache::HashToken(dispatchToken),
2644                        g_resolveCache->GetCacheBaseAddr(), counterAddr
2645 #if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
2646                        , stackArgumentsSize
2647 #endif
2648                        );
2649     ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2650
2651     //incr our counters
2652     stats.stub_poly_counter++;
2653     stats.stub_space += sizeof(ResolveHolder)+sizeof(size_t);
2654     LOG((LF_STUBS, LL_INFO10000, "GenerateResolveStub  for token" FMT_ADDR "at" FMT_ADDR "\n",
2655                                  DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2656
2657 #ifdef FEATURE_PERFMAP
2658     PerfMap::LogStubs(__FUNCTION__, "GenerateResolveStub", (PCODE)holder->stub(), holder->stub()->size());
2659 #endif
2660
2661     RETURN (holder);
2662 }
2663
2664 //----------------------------------------------------------------------------
2665 /* Generate a lookup stub for the given dispatchToken.  addrOfResolver is where the stub always transfers control
2666 */
2667 LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken)
2668 {
2669     CONTRACT (LookupHolder*) {
2670         THROWS;
2671         GC_TRIGGERS;
2672         INJECT_FAULT(COMPlusThrowOM(););
2673         PRECONDITION(addrOfResolver != NULL);
2674         POSTCONDITION(CheckPointer(RETVAL));
2675     } CONTRACT_END;
2676
2677     //allocate from the requisite heap and copy the template over it.
2678     LookupHolder * holder     = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
2679     ExecutableWriterHolder<LookupHolder> lookupWriterHolder(holder, sizeof(LookupHolder));
2680
2681     lookupWriterHolder.GetRW()->Initialize(holder, addrOfResolver, dispatchToken);
2682     ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2683
2684     //incr our counters
2685     stats.stub_lookup_counter++;
2686     stats.stub_space += sizeof(LookupHolder);
2687     LOG((LF_STUBS, LL_INFO10000, "GenerateLookupStub   for token" FMT_ADDR "at" FMT_ADDR "\n",
2688                                  DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2689
2690 #ifdef FEATURE_PERFMAP
2691     PerfMap::LogStubs(__FUNCTION__, "GenerateLookupStub", (PCODE)holder->stub(), holder->stub()->size());
2692 #endif
2693
2694     RETURN (holder);
2695 }
2696
2697 //----------------------------------------------------------------------------
2698 /* Generate a cache entry
2699 */
2700 ResolveCacheElem *VirtualCallStubManager::GenerateResolveCacheElem(void *addrOfCode,
2701                                                                    void *pMTExpected,
2702                                                                    size_t token,
2703                                                                    bool *pMayHaveReenteredCooperativeGCMode)
2704 {
2705     CONTRACTL
2706     {
2707         THROWS;
2708         GC_TRIGGERS;
2709         INJECT_FAULT(COMPlusThrowOM(););
2710         PRECONDITION(pMayHaveReenteredCooperativeGCMode != nullptr);
2711         PRECONDITION(!*pMayHaveReenteredCooperativeGCMode);
2712     }
2713     CONTRACTL_END
2714
2715     CONSISTENCY_CHECK(CheckPointer(pMTExpected));
2716
2717     //allocate from the requisite heap and set the appropriate fields
2718     ResolveCacheElem *e = (ResolveCacheElem*) (void*)
2719         cache_entry_heap->AllocAlignedMem(sizeof(ResolveCacheElem), CODE_SIZE_ALIGN);
2720
2721     e->pMT    = pMTExpected;
2722     e->token  = token;
2723     e->target = addrOfCode;
2724
2725     e->pNext  = NULL;
2726
2727 #ifdef FEATURE_CODE_VERSIONING
2728     MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress((PCODE)addrOfCode);
2729     if (pMD->IsVersionableWithVtableSlotBackpatch())
2730     {
2731         pMD->RecordAndBackpatchEntryPointSlot(
2732             m_loaderAllocator,
2733             (TADDR)&e->target,
2734             EntryPointSlots::SlotType_Normal);
2735
2736         // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
2737         *pMayHaveReenteredCooperativeGCMode = true;
2738     }
2739 #endif
2740
2741     //incr our counters
2742     stats.cache_entry_counter++;
2743     stats.cache_entry_space += sizeof(ResolveCacheElem);
2744
2745     return e;
2746 }
2747
2748 //------------------------------------------------------------------
2749 // Adds the stub manager to our linked list of virtual stub managers
2750 // and adds to the global list.
2751 //------------------------------------------------------------------
2752 void VirtualCallStubManagerManager::AddStubManager(VirtualCallStubManager *pMgr)
2753 {
2754     WRAPPER_NO_CONTRACT;
2755
2756     SimpleWriteLockHolder lh(&m_RWLock);
2757
2758     pMgr->m_pNext = m_pManagers;
2759     m_pManagers = pMgr;
2760
2761     STRESS_LOG2(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
2762         "VirtualCallStubManagerManager::AddStubManager - 0x%p (vptr 0x%p)\n", pMgr, (*(PVOID*)pMgr));
2763 }
2764
2765 //------------------------------------------------------------------
2766 // Removes the stub manager from our linked list of virtual stub
2767 // managers and fromthe global list.
2768 //------------------------------------------------------------------
2769 void VirtualCallStubManagerManager::RemoveStubManager(VirtualCallStubManager *pMgr)
2770 {
2771     CONTRACTL
2772     {
2773         NOTHROW;
2774         GC_NOTRIGGER;
2775         MODE_ANY;
2776         CAN_TAKE_LOCK;
2777     }
2778     CONTRACTL_END;
2779
2780     SimpleWriteLockHolder lh(&m_RWLock);
2781
2782     // Remove this manager from our list.
2783     for (VirtualCallStubManager **pCur = &m_pManagers;
2784          *pCur != NULL;
2785          pCur = &((*pCur)->m_pNext))
2786     {
2787         if (*pCur == pMgr)
2788             *pCur = (*pCur)->m_pNext;
2789     }
2790
2791     // Make sure we don't have a residual pointer left over.
2792     m_pCacheElem = NULL;
2793
2794     STRESS_LOG1(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
2795         "VirtualCallStubManagerManager::RemoveStubManager - 0x%p\n", pMgr);
2796 }
2797
2798 //------------------------------------------------------------------
2799 // Logs stub usage statistics
2800 //------------------------------------------------------------------
2801 void VirtualCallStubManager::LogStats()
2802 {
2803     STATIC_CONTRACT_NOTHROW;
2804     STATIC_CONTRACT_GC_NOTRIGGER;
2805     STATIC_CONTRACT_FORBID_FAULT;
2806
2807     // Our Init routine assignes all fields atomically so testing one field should suffice to
2808     // test whehter the Init succeeded.
2809     if (!resolvers)
2810     {
2811         return;
2812     }
2813
2814     // Temp space to use for formatting the output.
2815     static const int FMT_STR_SIZE = 160;
2816     char szPrintStr[FMT_STR_SIZE];
2817     DWORD dwWriteByte;
2818
2819     if (g_hStubLogFile && (stats.site_write != 0))
2820     {
2821         //output counters
2822         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", stats.site_counter);
2823         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2824         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write", stats.site_write);
2825         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2826         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", stats.site_write_mono);
2827         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2828         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", stats.site_write_poly);
2829         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2830
2831         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\nstub data\r\n");
2832         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2833
2834         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", stats.stub_lookup_counter);
2835         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2836         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", stats.stub_mono_counter);
2837         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2838         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", stats.stub_poly_counter);
2839         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2840         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", stats.stub_space);
2841         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2842
2843         size_t total, used;
2844         g_resolveCache->GetLoadFactor(&total, &used);
2845
2846         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_SIZE, "cache_entry_used", used);
2847         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2848         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", stats.cache_entry_counter);
2849         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2850         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", stats.cache_entry_space);
2851         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2852
2853         sprintf_s(szPrintStr, ARRAY_SIZE(szPrintStr), "\r\ncache_load:\t%zu used, %zu total, utilization %#5.2f%%\r\n",
2854                 used, total, 100.0 * double(used) / double(total));
2855         WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
2856     }
2857
2858     resolvers->LogStats();
2859     dispatchers->LogStats();
2860     lookups->LogStats();
2861     vtableCallers->LogStats();
2862     cache_entries->LogStats();
2863
2864     g_site_counter += stats.site_counter;
2865     g_stub_lookup_counter += stats.stub_lookup_counter;
2866     g_stub_poly_counter += stats.stub_poly_counter;
2867     g_stub_mono_counter += stats.stub_mono_counter;
2868     g_stub_vtable_counter += stats.stub_vtable_counter;
2869     g_site_write += stats.site_write;
2870     g_site_write_poly += stats.site_write_poly;
2871     g_site_write_mono += stats.site_write_mono;
2872     g_worker_call += stats.worker_call;
2873     g_worker_call_no_patch += stats.worker_call_no_patch;
2874     g_worker_collide_to_mono += stats.worker_collide_to_mono;
2875     g_stub_space += stats.stub_space;
2876     g_cache_entry_counter += stats.cache_entry_counter;
2877     g_cache_entry_space += stats.cache_entry_space;
2878
2879     stats.site_counter = 0;
2880     stats.stub_lookup_counter = 0;
2881     stats.stub_poly_counter = 0;
2882     stats.stub_mono_counter = 0;
2883     stats.stub_vtable_counter = 0;
2884     stats.site_write = 0;
2885     stats.site_write_poly = 0;
2886     stats.site_write_mono = 0;
2887     stats.worker_call = 0;
2888     stats.worker_call_no_patch = 0;
2889     stats.worker_collide_to_mono = 0;
2890     stats.stub_space = 0;
2891     stats.cache_entry_counter = 0;
2892     stats.cache_entry_space = 0;
2893 }
2894
2895 void Prober::InitProber(size_t key1, size_t key2, size_t* table)
2896 {
2897     CONTRACTL {
2898         NOTHROW;
2899         GC_NOTRIGGER;
2900         FORBID_FAULT;
2901     } CONTRACTL_END
2902
2903     _ASSERTE(table);
2904
2905     keyA = key1;
2906     keyB = key2;
2907     base = &table[CALL_STUB_FIRST_INDEX];
2908     mask = table[CALL_STUB_MASK_INDEX];
2909     FormHash();
2910 }
2911
2912 size_t Prober::Find()
2913 {
2914     CONTRACTL {
2915         NOTHROW;
2916         GC_NOTRIGGER;
2917         FORBID_FAULT;
2918     } CONTRACTL_END
2919
2920     size_t entry;
2921     //if this prober has already visited every slot, there is nothing more to look at.
2922     //note, this means that if a prober is going to be reused, the FormHash() function
2923     //needs to be called to reset it.
2924     if (NoMore())
2925         return CALL_STUB_EMPTY_ENTRY;
2926     do
2927     {
2928         entry = Read();
2929
2930         //if we hit an empty entry, it means it cannot be in the table
2931         if(entry==CALL_STUB_EMPTY_ENTRY)
2932         {
2933             return CALL_STUB_EMPTY_ENTRY;
2934         }
2935
2936         //we have a real entry, see if it is the one we want using our comparer
2937         comparer->SetContents(entry);
2938         if (comparer->Equals(keyA, keyB))
2939         {
2940             return entry;
2941         }
2942     } while(Next()); //Next() returns false when we have visited every slot
2943     return CALL_STUB_EMPTY_ENTRY;
2944 }
2945
2946 size_t Prober::Add(size_t newEntry)
2947 {
2948     CONTRACTL {
2949         NOTHROW;
2950         GC_NOTRIGGER;
2951         FORBID_FAULT;
2952     } CONTRACTL_END
2953
2954     size_t entry;
2955     //if we have visited every slot then there is no room in the table to add this new entry
2956     if (NoMore())
2957         return CALL_STUB_EMPTY_ENTRY;
2958
2959     do
2960     {
2961         entry = Read();
2962         if (entry==CALL_STUB_EMPTY_ENTRY)
2963         {
2964             //it's not in the table and we have the correct empty slot in hand
2965             //in which to add it.
2966             //try and grab it, if we succeed we break out to add the entry
2967             //if we fail, it means a racer swoped in a wrote in
2968             //this slot, so we will just keep looking
2969             if (GrabEntry(newEntry))
2970             {
2971                 break;
2972             }
2973
2974             // We didn't grab this entry, so keep trying.
2975             continue;
2976         }
2977         //check if this entry is already in the table, if so we are done
2978         comparer->SetContents(entry);
2979         if (comparer->Equals(keyA, keyB))
2980         {
2981             return entry;
2982         }
2983     } while(Next()); //Next() returns false when we have visited every slot
2984
2985     //if we have visited every slot then there is no room in the table to add this new entry
2986     if (NoMore())
2987         return CALL_STUB_EMPTY_ENTRY;
2988
2989     CONSISTENCY_CHECK(Read() == newEntry);
2990     return newEntry;
2991 }
2992
2993 /*Atomically grab an entry, if it is empty, so we can write in it.
2994 @TODO: It is not clear if this routine is actually necessary and/or if the
2995 interlocked compare exchange is necessary as opposed to just a read write with racing allowed.
2996 If we didn't have it, all that would happen is potentially more duplicates or
2997 dropped entries, and we are supposed to run correctly even if they
2998 happen.  So in a sense this is a perf optimization, whose value has
2999 not been measured, i.e. it might be faster without it.
3000 */
3001 BOOL Prober::GrabEntry(size_t entryValue)
3002 {
3003     LIMITED_METHOD_CONTRACT;
3004
3005     return InterlockedCompareExchangeT(&base[index],
3006         entryValue, static_cast<size_t>(CALL_STUB_EMPTY_ENTRY)) == CALL_STUB_EMPTY_ENTRY;
3007 }
3008
3009 inline void FastTable::IncrementCount()
3010 {
3011     LIMITED_METHOD_CONTRACT;
3012
3013     // This MUST be an interlocked increment, since BucketTable::GetMoreSpace relies on
3014     // the return value of FastTable::isFull to tell it whether or not to continue with
3015     // trying to allocate a new FastTable. If two threads race and try to increment this
3016     // at the same time and one increment is lost, then the size will be inaccurate and
3017     // BucketTable::GetMoreSpace will never succeed, resulting in an infinite loop trying
3018     // to add a new entry.
3019     InterlockedIncrement((LONG *)&contents[CALL_STUB_COUNT_INDEX]);
3020 }
3021
3022 size_t FastTable::Add(size_t entry, Prober* probe)
3023 {
3024     CONTRACTL {
3025         NOTHROW;
3026         GC_NOTRIGGER;
3027         FORBID_FAULT;
3028     } CONTRACTL_END
3029
3030     size_t result = probe->Add(entry);
3031     if (result == entry) IncrementCount();
3032     return result;
3033 }
3034
3035 size_t FastTable::Find(Prober* probe)
3036 {
3037     WRAPPER_NO_CONTRACT;
3038
3039     return probe->Find();
3040 }
3041
3042 /*Increase the size of the bucket referenced by the prober p and copy the existing members into it.
3043 Since duplicates and lost entries are okay, we can build the larger table
3044 and then try to swap it in.  If it turns out that somebody else is racing us,
3045 the worst that will happen is we drop a few entries on the floor, which is okay.
3046 If by chance we swap out a table that somebody else is inserting an entry into, that
3047 is okay too, just another dropped entry.  If we detect dups, we just drop them on
3048 the floor. */
3049 BOOL BucketTable::GetMoreSpace(const Prober* p)
3050 {
3051     CONTRACTL {
3052         THROWS;
3053         GC_TRIGGERS;
3054         MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3055         INJECT_FAULT(COMPlusThrowOM(););
3056     } CONTRACTL_END;
3057
3058     //get ahold of the current bucket
3059     Prober probe(p->comparer);
3060     size_t index = ComputeBucketIndex(p->keyA, p->keyB);
3061
3062     FastTable* oldBucket = (FastTable*) Read(index);
3063
3064     if (!oldBucket->isFull())
3065     {
3066         return TRUE;
3067     }
3068     //make a larger bucket
3069     size_t numEntries;
3070     if (oldBucket->tableSize() == CALL_STUB_MIN_ENTRIES)
3071     {
3072         numEntries = CALL_STUB_SECONDARY_ENTRIES;
3073     }
3074     else
3075     {
3076         numEntries = oldBucket->tableSize()*CALL_STUB_GROWTH_FACTOR;
3077     }
3078
3079     FastTable* newBucket = FastTable::MakeTable(numEntries);
3080
3081     //copy via insertion from the old to the new bucket
3082     size_t* limit = &oldBucket->contents[(oldBucket->tableSize())+CALL_STUB_FIRST_INDEX];
3083     size_t* e;
3084     for (e = &oldBucket->contents[CALL_STUB_FIRST_INDEX]; e<limit; e++)
3085     {
3086         size_t moved = *e;
3087         if (moved == CALL_STUB_EMPTY_ENTRY)
3088         {
3089             continue;
3090         }
3091         probe.comparer->SetContents(moved);
3092         probe.InitProber(probe.comparer->KeyA(), probe.comparer->KeyB(), &newBucket->contents[0]);
3093         //if the new bucket fills up, give up (this should never happen I think)
3094         if (newBucket->Add(moved, &probe) == CALL_STUB_EMPTY_ENTRY)
3095         {
3096             _ASSERTE(!"This should never happen");
3097             return FALSE;
3098         }
3099     }
3100
3101     // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3102     // replacing the entry, then we will just put the new bucket we just created in the
3103     // dead list instead of risking a race condition which would put a duplicate of the old
3104     // bucket in the dead list (and even possibly cause a cyclic list).
3105     if (InterlockedCompareExchangeT(reinterpret_cast<FastTable * volatile *>(&buckets[index]), newBucket, oldBucket) != oldBucket)
3106         oldBucket = newBucket;
3107
3108     // Link the old onto the "to be reclaimed" list.
3109     // Use the dead link field of the abandoned buckets to form the list
3110     FastTable* list;
3111     do {
3112         list = VolatileLoad(&dead);
3113         oldBucket->contents[CALL_STUB_DEAD_LINK] = (size_t) list;
3114     } while (InterlockedCompareExchangeT(&dead, oldBucket, list) != list);
3115
3116 #ifdef _DEBUG
3117     {
3118         // Validate correctness of the list
3119         FastTable *curr = oldBucket;
3120         while (curr)
3121         {
3122             FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3123             size_t i = 0;
3124             while (next)
3125             {
3126                 next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3127                 _ASSERTE(curr != next); // Make sure we don't have duplicates
3128                 _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3129             }
3130             curr = next;
3131         }
3132     }
3133 #endif // _DEBUG
3134
3135     //update our counters
3136     stats.bucket_space_dead += UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3137     stats.bucket_space      -= UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3138     stats.bucket_space      += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3139     return TRUE;
3140 }
3141
3142 void BucketTable::Reclaim()
3143 {
3144
3145     CONTRACTL
3146     {
3147         NOTHROW;
3148         GC_NOTRIGGER;
3149         FORBID_FAULT;
3150     }
3151     CONTRACTL_END
3152
3153     //reclaim the dead (abandoned) buckets on the dead list
3154     //       The key issue is to not reclaim the list if any thread is in a stub or
3155     //       if any thread is accessing (read or write) the cache tables.  So we will declare
3156     //       those points to be non-gc safe points, and reclaim when the gc syncs the threads
3157     //@TODO: add an assert to ensure we are at a gc safe point
3158     FastTable* list = dead;
3159
3160     //see if there is anything to do.
3161     //We ignore the race, since we will just pick them up on the next go around
3162     if (list == NULL) return;
3163
3164     //Try and grab the list exclusively, if we fail, it means that either somebody
3165     //else grabbed it, or something go added.  In either case we just give up and assume
3166     //we will catch it on the next go around.
3167     //we use an interlock here in case we are called during shutdown not at a gc safe point
3168     //in which case the race is between several threads wanting to reclaim.
3169     //We are assuming that we are assuming the actually having to do anything is rare
3170     //so that the interlocked overhead is acceptable.  If this is not true, then
3171     //we need to examine exactly how and when we may be called during shutdown.
3172     if (InterlockedCompareExchangeT(&dead, NULL, list) != list)
3173         return;
3174
3175 #ifdef _DEBUG
3176     // Validate correctness of the list
3177     FastTable *curr = list;
3178     while (curr)
3179     {
3180         FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3181         size_t i = 0;
3182         while (next)
3183         {
3184             next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3185             _ASSERTE(curr != next); // Make sure we don't have duplicates
3186             _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3187         }
3188         curr = next;
3189     }
3190 #endif // _DEBUG
3191
3192     //we now have the list off by ourself, so we can just walk and cleanup
3193     while (list)
3194     {
3195         size_t next = list->contents[CALL_STUB_DEAD_LINK];
3196         delete list;
3197         list = (FastTable*) next;
3198     }
3199 }
3200
3201 //
3202 // When using SetUpProber the proper values to use for keyA, keyB are:
3203 //
3204 //                 KeyA              KeyB
3205 //-------------------------------------------------------
3206 // lookups         token     the stub calling convention
3207 // dispatchers     token     the expected MT
3208 // resolver        token     the stub calling convention
3209 // cache_entries   token     the expected method table
3210 // vtableCallers   token     unused (zero)
3211 //
3212 BOOL BucketTable::SetUpProber(size_t keyA, size_t keyB, Prober *prober)
3213 {
3214     CONTRACTL {
3215         THROWS;
3216         GC_TRIGGERS;
3217         MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3218         INJECT_FAULT(COMPlusThrowOM(););
3219     } CONTRACTL_END;
3220
3221     // The buckets[index] table starts off initialized to all CALL_STUB_EMPTY_ENTRY
3222     // and we should write each buckets[index] exactly once. However in a multi-proc
3223     // scenario each processor could see old memory values that would cause us to
3224     // leak memory.
3225     //
3226     // Since this is a fairly hot code path and it is very rare for buckets[index]
3227     // to be CALL_STUB_EMPTY_ENTRY, we can first try a non-volatile read and then
3228     // if it looks like we need to create a new FastTable we double check by doing
3229     // a volatile read.
3230     //
3231     // Note that BucketTable::GetMoreSpace also updates buckets[index] when the FastTable
3232     // grows to 90% full.  (CALL_STUB_LOAD_FACTOR is 90%)
3233
3234     size_t index = ComputeBucketIndex(keyA, keyB);
3235     size_t bucket = buckets[index];  // non-volatile read
3236     if (bucket==CALL_STUB_EMPTY_ENTRY)
3237     {
3238         bucket = Read(index);        // volatile read
3239     }
3240
3241     if (bucket==CALL_STUB_EMPTY_ENTRY)
3242     {
3243         FastTable* newBucket = FastTable::MakeTable(CALL_STUB_MIN_ENTRIES);
3244
3245         // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3246         // replacing the entry, then we will free the new bucket we just created.
3247         bucket = InterlockedCompareExchangeT(&buckets[index], reinterpret_cast<size_t>(newBucket), static_cast<size_t>(CALL_STUB_EMPTY_ENTRY));
3248         if (bucket == CALL_STUB_EMPTY_ENTRY)
3249         {
3250             // We successfully wrote newBucket into buckets[index], overwritting the CALL_STUB_EMPTY_ENTRY value
3251             stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3252             bucket = (size_t) newBucket;
3253         }
3254         else
3255         {
3256             // Someone else wrote buckets[index] before us
3257             // and bucket contains the value that they wrote
3258             // We must free the memory that we allocated
3259             // and we will use the value that someone else wrote
3260             delete newBucket;
3261             newBucket = (FastTable*) bucket;
3262         }
3263     }
3264
3265     return ((FastTable*)(bucket))->SetUpProber(keyA, keyB, prober);
3266 }
3267
3268 size_t BucketTable::Add(size_t entry, Prober* probe)
3269 {
3270     CONTRACTL {
3271         THROWS;
3272         GC_TRIGGERS;
3273         MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3274         INJECT_FAULT(COMPlusThrowOM(););
3275     } CONTRACTL_END
3276
3277     FastTable* table = (FastTable*)(probe->items());
3278     size_t result = table->Add(entry,probe);
3279     if (result != CALL_STUB_EMPTY_ENTRY)
3280     {
3281         return result;
3282     }
3283     //we must have missed count(s) and the table is now full, so lets
3284     //grow and retry (this should be rare)
3285     if (!GetMoreSpace(probe)) return CALL_STUB_EMPTY_ENTRY;
3286     if (!SetUpProber(probe->keyA, probe->keyB, probe)) return CALL_STUB_EMPTY_ENTRY;
3287     return Add(entry, probe);  //recurse in for the retry to write the entry
3288 }
3289
3290 void BucketTable::LogStats()
3291 {
3292     LIMITED_METHOD_CONTRACT;
3293
3294     // Update stats
3295     g_bucket_space += stats.bucket_space;
3296     g_bucket_space_dead += stats.bucket_space_dead;
3297
3298     stats.bucket_space      = 0;
3299     stats.bucket_space_dead = 0;
3300 }
3301
3302 DispatchCache::DispatchCache()
3303 #ifdef CHAIN_LOOKUP
3304     : m_writeLock(CrstStubDispatchCache, CRST_UNSAFE_ANYMODE)
3305 #endif
3306 {
3307     CONTRACTL
3308     {
3309         THROWS;
3310         GC_TRIGGERS;
3311         INJECT_FAULT(COMPlusThrowOM());
3312     }
3313     CONTRACTL_END
3314
3315     //initialize the cache to be empty, i.e. all slots point to the empty entry
3316     ResolveCacheElem* e = new ResolveCacheElem();
3317     e->pMT = (void *) (-1); //force all method tables to be misses
3318     e->pNext = NULL; // null terminate the chain for the empty entry
3319     empty = e;
3320     for (int i = 0;i<CALL_STUB_CACHE_SIZE;i++)
3321         ClearCacheEntry(i);
3322
3323     // Initialize statistics
3324     memset(&stats, 0, sizeof(stats));
3325 #ifdef STUB_LOGGING
3326     memset(&cacheData, 0, sizeof(cacheData));
3327 #endif
3328 }
3329
3330 ResolveCacheElem* DispatchCache::Lookup(size_t token, UINT16 tokenHash, void* mt)
3331 {
3332     WRAPPER_NO_CONTRACT;
3333     if (tokenHash == INVALID_HASH)
3334         tokenHash = HashToken(token);
3335     UINT16 idx = HashMT(tokenHash, mt);
3336     ResolveCacheElem *pCurElem = GetCacheEntry(idx);
3337
3338 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3339     BOOL chainedLookup = FALSE;
3340 #endif
3341     // No need to conditionlize on CHAIN_LOOKUP, since this loop
3342     // will only run once when CHAIN_LOOKUP is undefined, since
3343     // there will only ever be one element in a bucket (chain of 1).
3344     while (pCurElem != empty) {
3345         if (pCurElem->Equals(token, mt)) {
3346             return pCurElem;
3347         }
3348 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3349         // Only want to inc the counter once per chain search.
3350         if (pCurElem == GetCacheEntry(idx)) {
3351             chainedLookup = TRUE;
3352             g_chained_lookup_external_call_counter++;
3353         }
3354 #endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3355         pCurElem = pCurElem->Next();
3356     }
3357 #if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3358     if (chainedLookup) {
3359         g_chained_lookup_external_miss_counter++;
3360     }
3361 #endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3362     return NULL; /* with chain lookup disabled this returns NULL */
3363 }
3364
3365 // returns true if we wrote the resolver cache entry with the new elem
3366 //    also returns true if the cache entry already contained elem (the miss case)
3367 //
3368 BOOL DispatchCache::Insert(ResolveCacheElem* elem, InsertKind insertKind)
3369 {
3370     CONTRACTL {
3371         THROWS;
3372         GC_TRIGGERS;
3373         FORBID_FAULT;
3374         PRECONDITION(insertKind != IK_NONE);
3375     } CONTRACTL_END;
3376
3377 #ifdef CHAIN_LOOKUP
3378     CrstHolder lh(&m_writeLock);
3379 #endif
3380
3381     // Figure out what bucket this element belongs in
3382     UINT16 tokHash = HashToken(elem->token);
3383     UINT16 hash    = HashMT(tokHash, elem->pMT);
3384     UINT16 idx     = hash;
3385     BOOL   write   = FALSE;
3386     BOOL   miss    = FALSE;
3387     BOOL   hit     = FALSE;
3388     BOOL   collide = FALSE;
3389
3390 #ifdef _DEBUG
3391     elem->debug_hash = tokHash;
3392     elem->debug_index = idx;
3393 #endif // _DEBUG
3394
3395         ResolveCacheElem* cell = GetCacheEntry(idx);
3396
3397 #ifdef CHAIN_LOOKUP
3398     // There is the possibility of a race where two threads will
3399     // try to generate a ResolveCacheElem for the same tuple, and
3400     // the first thread will get the lock and insert the element
3401     // and the second thread coming in should detect this and not
3402     // re-add the element, since it is already likely at the start
3403     // of the list, and would result in the element looping to
3404     // itself.
3405     if (Lookup(elem->token, tokHash, elem->pMT))
3406 #else // !CHAIN_LOOKUP
3407         if (cell == elem)
3408 #endif // !CHAIN_LOOKUP
3409         {
3410             miss  = TRUE;
3411             write = FALSE;
3412         }
3413     else
3414     {
3415         if (cell == empty)
3416         {
3417             hit   = TRUE;
3418             write = TRUE;
3419     }
3420     }
3421     CONSISTENCY_CHECK(!(hit && miss));
3422
3423     // If we didn't have a miss or a hit then we had a collision with
3424     // a non-empty entry in our resolver cache
3425     if (!hit && !miss)
3426     {
3427         collide = TRUE;
3428
3429 #ifdef CHAIN_LOOKUP
3430         // Always insert the entry into the chain
3431         write = TRUE;
3432 #else // !CHAIN_LOOKUP
3433
3434         if (STUB_COLLIDE_WRITE_PCT < 100)
3435         {
3436             UINT32 coin = UINT32(GetRandomInt(100));
3437
3438             write = (coin < STUB_COLLIDE_WRITE_PCT);
3439         }
3440         else
3441         {
3442             write = TRUE;
3443         }
3444
3445 #endif // !CHAIN_LOOKUP
3446     }
3447
3448     if (write)
3449     {
3450 #ifdef CHAIN_LOOKUP
3451         // We create a list with the last pNext pointing at empty
3452         elem->pNext = cell;
3453 #else // !CHAIN_LOOKUP
3454         elem->pNext = empty;
3455 #endif // !CHAIN_LOOKUP
3456         SetCacheEntry(idx, elem);
3457         stats.insert_cache_write++;
3458     }
3459
3460     LOG((LF_STUBS, LL_INFO1000, "%8s Insert(token" FMT_ADDR "MethodTable" FMT_ADDR ") at [%03x] %7s %5s \n",
3461          (insertKind == IK_DISPATCH) ? "Dispatch" : (insertKind == IK_RESOLVE) ? "Resolve" : "External",
3462          DBG_ADDR(elem->token), DBG_ADDR(elem->pMT), hash,
3463          hit ? "HIT" : miss ? "MISS" : "COLLIDE", write ? "WRITE" : "KEEP"));
3464
3465     if (insertKind == IK_DISPATCH)
3466         stats.insert_cache_dispatch++;
3467     else if (insertKind == IK_RESOLVE)
3468         stats.insert_cache_resolve++;
3469     else if (insertKind == IK_SHARED)
3470         stats.insert_cache_shared++;
3471     else if (insertKind == IK_EXTERNAL)
3472         stats.insert_cache_external++;
3473
3474     if (hit)
3475         stats.insert_cache_hit++;
3476     else if (miss)
3477         stats.insert_cache_miss++;
3478     else if (collide)
3479         stats.insert_cache_collide++;
3480
3481     return write || miss;
3482 }
3483
3484 #ifdef CHAIN_LOOKUP
3485 void DispatchCache::PromoteChainEntry(ResolveCacheElem* elem)
3486 {
3487     CONTRACTL {
3488         NOTHROW;
3489         GC_NOTRIGGER;
3490         FORBID_FAULT;
3491     } CONTRACTL_END;
3492
3493     CrstHolder lh(&m_writeLock);
3494     g_chained_entry_promoted++;
3495
3496     // Figure out what bucket this element belongs in
3497     UINT16 tokHash = HashToken(elem->token);
3498     UINT16 hash    = HashMT(tokHash, elem->pMT);
3499     UINT16 idx     = hash;
3500
3501     ResolveCacheElem *curElem = GetCacheEntry(idx);
3502
3503     // If someone raced in and promoted this element before us,
3504     // then we can just return. Furthermore, it would be an
3505     // error if we performed the below code, since we'd end up
3506     // with a self-referential element and an infinite loop.
3507     if (curElem == elem)
3508     {
3509         return;
3510     }
3511
3512     // Now loop through the chain to find the element that is
3513     // point to the element we're promoting so we can remove
3514     // it from the chain.
3515     while (curElem->Next() != elem)
3516     {
3517         curElem = curElem->pNext;
3518         CONSISTENCY_CHECK(curElem != NULL);
3519     }
3520
3521     // Remove the element from the chain
3522     CONSISTENCY_CHECK(curElem->pNext == elem);
3523     curElem->pNext = elem->pNext;
3524
3525     // Set the promoted entry to the head of the list.
3526     elem->pNext = GetCacheEntry(idx);
3527     SetCacheEntry(idx, elem);
3528 }
3529 #endif // CHAIN_LOOKUP
3530
3531 void DispatchCache::LogStats()
3532 {
3533     LIMITED_METHOD_CONTRACT;
3534
3535     g_insert_cache_external += stats.insert_cache_external;
3536     g_insert_cache_shared   += stats.insert_cache_shared;
3537     g_insert_cache_dispatch += stats.insert_cache_dispatch;
3538     g_insert_cache_resolve  += stats.insert_cache_resolve;
3539     g_insert_cache_hit      += stats.insert_cache_hit;
3540     g_insert_cache_miss     += stats.insert_cache_miss;
3541     g_insert_cache_collide  += stats.insert_cache_collide;
3542     g_insert_cache_write    += stats.insert_cache_write;
3543
3544     stats.insert_cache_external = 0;
3545     stats.insert_cache_shared = 0;
3546     stats.insert_cache_dispatch = 0;
3547     stats.insert_cache_resolve = 0;
3548     stats.insert_cache_hit = 0;
3549     stats.insert_cache_miss = 0;
3550     stats.insert_cache_collide = 0;
3551     stats.insert_cache_write = 0;
3552 }
3553
3554 /* The following tablse have bits that have the following properties:
3555    1. Each entry has 12-bits with 5,6 or 7 one bits and 5,6 or 7 zero bits.
3556    2. For every bit we try to have half one bits and half zero bits
3557    3. Adjacent entries when xor-ed should have 5,6 or 7 bits that are different
3558 */
3559 #ifdef HOST_64BIT
3560 static const UINT16 tokenHashBits[64] =
3561 #else // !HOST_64BIT
3562 static const UINT16 tokenHashBits[32] =
3563 #endif // !HOST_64BIT
3564 {
3565     0xcd5, 0x8b9, 0x875, 0x439,
3566     0xbf0, 0x38d, 0xa5b, 0x6a7,
3567     0x78a, 0x9c8, 0xee2, 0x3d3,
3568     0xd94, 0x54e, 0x698, 0xa6a,
3569     0x753, 0x932, 0x4b7, 0x155,
3570     0x3a7, 0x9c8, 0x4e9, 0xe0b,
3571     0xf05, 0x994, 0x472, 0x626,
3572     0x15c, 0x3a8, 0x56e, 0xe2d,
3573
3574 #ifdef HOST_64BIT
3575     0xe3c, 0xbe2, 0x58e, 0x0f3,
3576     0x54d, 0x70f, 0xf88, 0xe2b,
3577     0x353, 0x153, 0x4a5, 0x943,
3578     0xaf2, 0x88f, 0x72e, 0x978,
3579     0xa13, 0xa0b, 0xc3c, 0xb72,
3580     0x0f7, 0x49a, 0xdd0, 0x366,
3581     0xd84, 0xba5, 0x4c5, 0x6bc,
3582     0x8ec, 0x0b9, 0x617, 0x85c,
3583 #endif // HOST_64BIT
3584 };
3585
3586 /*static*/ UINT16 DispatchCache::HashToken(size_t token)
3587 {
3588     LIMITED_METHOD_CONTRACT;
3589
3590     UINT16 hash  = 0;
3591     int    index = 0;
3592
3593     // Note if you change the number of bits in CALL_STUB_CACHE_NUM_BITS
3594     // then we have to recompute the hash function
3595     // Though making the number of bits smaller should still be OK
3596     static_assert_no_msg(CALL_STUB_CACHE_NUM_BITS <= 12);
3597
3598     while (token)
3599     {
3600         if (token & 1)
3601             hash ^= tokenHashBits[index];
3602
3603         index++;
3604         token >>= 1;
3605     }
3606     _ASSERTE((hash & ~CALL_STUB_CACHE_MASK) == 0);
3607     return hash;
3608 }
3609
3610 /////////////////////////////////////////////////////////////////////////////////////////////
3611 DispatchCache::Iterator::Iterator(DispatchCache *pCache) : m_pCache(pCache), m_curBucket(-1)
3612 {
3613     CONTRACTL {
3614         NOTHROW;
3615         GC_NOTRIGGER;
3616         PRECONDITION(CheckPointer(pCache));
3617     } CONTRACTL_END;
3618
3619     // Move to the first valid entry
3620     NextValidBucket();
3621 }
3622
3623 /////////////////////////////////////////////////////////////////////////////////////////////
3624 void DispatchCache::Iterator::Next()
3625 {
3626     CONTRACTL {
3627         NOTHROW;
3628         GC_NOTRIGGER;
3629     } CONTRACTL_END;
3630
3631     if (!IsValid()) {
3632         return;
3633     }
3634
3635     // Move to the next element in the chain
3636     m_ppCurElem = &((*m_ppCurElem)->pNext);
3637
3638     // If the next element was the empty sentinel entry, move to the next valid bucket.
3639     if (*m_ppCurElem == m_pCache->empty) {
3640         NextValidBucket();
3641     }
3642 }
3643
3644 /////////////////////////////////////////////////////////////////////////////////////////////
3645 // This doesn't actually delete the entry, it just unlinks it from the chain.
3646 // Returns the unlinked entry.
3647 ResolveCacheElem *DispatchCache::Iterator::UnlinkEntry()
3648 {
3649     CONTRACTL {
3650         NOTHROW;
3651         GC_NOTRIGGER;
3652         CONSISTENCY_CHECK(IsValid());
3653     } CONTRACTL_END;
3654     ResolveCacheElem *pUnlinkedEntry = *m_ppCurElem;
3655     *m_ppCurElem = (*m_ppCurElem)->pNext;
3656     pUnlinkedEntry->pNext = m_pCache->empty;
3657     // If unlinking this entry took us to the end of this bucket, need to move to the next.
3658     if (*m_ppCurElem == m_pCache->empty) {
3659         NextValidBucket();
3660     }
3661     return pUnlinkedEntry;
3662 }
3663
3664 /////////////////////////////////////////////////////////////////////////////////////////////
3665 void DispatchCache::Iterator::NextValidBucket()
3666 {
3667     CONTRACTL {
3668         NOTHROW;
3669         GC_NOTRIGGER;
3670         CONSISTENCY_CHECK(IsValid());
3671     } CONTRACTL_END;
3672
3673     // Move to the next bucket that contains a cache entry
3674     do {
3675         NextBucket();
3676     } while (IsValid() && *m_ppCurElem == m_pCache->empty);
3677 }
3678
3679 #endif // !DACCESS_COMPILE
3680
3681 /////////////////////////////////////////////////////////////////////////////////////////////
3682 VirtualCallStubManager *VirtualCallStubManagerManager::FindVirtualCallStubManager(PCODE stubAddress)
3683 {
3684     CONTRACTL {
3685         NOTHROW;
3686         GC_NOTRIGGER;
3687     } CONTRACTL_END;
3688
3689     SUPPORTS_DAC;
3690
3691     return VirtualCallStubManager::FindStubManager(stubAddress);
3692 }
3693
3694 static VirtualCallStubManager * const IT_START = (VirtualCallStubManager *)(-1);
3695
3696 /////////////////////////////////////////////////////////////////////////////////////////////
3697 // Move to the next element. Iterators are created at
3698 // start-1, so must call Next before using Current
3699 BOOL VirtualCallStubManagerIterator::Next()
3700 {
3701     LIMITED_METHOD_DAC_CONTRACT;
3702
3703     if (m_fIsStart)
3704     {
3705         m_fIsStart = FALSE;
3706     }
3707     else if (m_pCurMgr != NULL)
3708     {
3709         m_pCurMgr = m_pCurMgr->m_pNext;
3710     }
3711
3712     return (m_pCurMgr != NULL);
3713 }
3714
3715 /////////////////////////////////////////////////////////////////////////////////////////////
3716 // Get the current contents of the iterator
3717 VirtualCallStubManager *VirtualCallStubManagerIterator::Current()
3718 {
3719     LIMITED_METHOD_DAC_CONTRACT;
3720     CONSISTENCY_CHECK(!m_fIsStart);
3721     CONSISTENCY_CHECK(CheckPointer(m_pCurMgr));
3722
3723     return m_pCurMgr;
3724 }
3725
3726 #ifndef DACCESS_COMPILE
3727 /////////////////////////////////////////////////////////////////////////////////////////////
3728 VirtualCallStubManagerManager::VirtualCallStubManagerManager()
3729     : m_pManagers(NULL),
3730       m_pCacheElem(NULL),
3731       m_RWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT)
3732 {
3733     LIMITED_METHOD_CONTRACT;
3734 }
3735
3736 /////////////////////////////////////////////////////////////////////////////////////////////
3737 /* static */
3738 void VirtualCallStubManagerManager::InitStatic()
3739 {
3740     STANDARD_VM_CONTRACT;
3741
3742     CONSISTENCY_CHECK(g_pManager == NULL);
3743     g_pManager = new VirtualCallStubManagerManager();
3744 }
3745 #endif
3746
3747 /////////////////////////////////////////////////////////////////////////////////////////////
3748 VirtualCallStubManagerIterator VirtualCallStubManagerManager::IterateVirtualCallStubManagers()
3749 {
3750     WRAPPER_NO_CONTRACT;
3751     SUPPORTS_DAC;
3752
3753     VirtualCallStubManagerIterator it(VirtualCallStubManagerManager::GlobalManager());
3754     return it;
3755 }
3756
3757 /////////////////////////////////////////////////////////////////////////////////////////////
3758 BOOL VirtualCallStubManagerManager::CheckIsStub_Internal(
3759                     PCODE stubStartAddress)
3760 {
3761     WRAPPER_NO_CONTRACT;
3762     SUPPORTS_DAC;
3763
3764     // Forwarded to from RangeSectionStubManager
3765     return FALSE;
3766 }
3767
3768 /////////////////////////////////////////////////////////////////////////////////////////////
3769 BOOL VirtualCallStubManagerManager::DoTraceStub(
3770                     PCODE stubStartAddress,
3771                     TraceDestination *trace)
3772 {
3773     WRAPPER_NO_CONTRACT;
3774
3775     // Find the owning manager. We should succeed, since presumably someone already
3776     // called CheckIsStub on us to find out that we own the address, and already
3777     // called TraceManager to initiate a trace.
3778     VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
3779     CONSISTENCY_CHECK(CheckPointer(pMgr));
3780
3781     return pMgr->DoTraceStub(stubStartAddress, trace);
3782 }
3783
3784 #ifndef DACCESS_COMPILE
3785 /////////////////////////////////////////////////////////////////////////////////////////////
3786 MethodDesc *VirtualCallStubManagerManager::Entry2MethodDesc(
3787                     PCODE stubStartAddress,
3788                     MethodTable *pMT)
3789 {
3790     CONTRACTL
3791     {
3792         THROWS;
3793         GC_TRIGGERS;
3794         INJECT_FAULT(COMPlusThrowOM(););
3795     }
3796     CONTRACTL_END
3797
3798     if (pMT == NULL)
3799         return NULL;
3800
3801     StubCodeBlockKind sk = STUB_CODE_BLOCK_UNKNOWN;
3802
3803     // Find the owning manager.
3804     VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(stubStartAddress,  &sk);
3805     if (pMgr == NULL)
3806         return NULL;
3807
3808     // Do the full resolve
3809     DispatchToken token(VirtualCallStubManager::GetTokenFromStubQuick(pMgr, stubStartAddress, sk));
3810
3811     PCODE target = NULL;
3812     // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
3813     // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
3814     VirtualCallStubManager::Resolver(pMT, token, NULL, &target, TRUE /* throwOnConflict */);
3815
3816     return pMT->GetMethodDescForSlotAddress(target);
3817 }
3818 #endif
3819
3820 #ifdef DACCESS_COMPILE
3821 void VirtualCallStubManagerManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
3822 {
3823     SUPPORTS_DAC;
3824     WRAPPER_NO_CONTRACT;
3825     VirtualCallStubManagerIterator it = IterateVirtualCallStubManagers();
3826     while (it.Next())
3827     {
3828         it.Current()->DoEnumMemoryRegions(flags);
3829     }
3830 }
3831 #endif
3832
3833 //----------------------------------------------------------------------------
3834 BOOL VirtualCallStubManagerManager::TraceManager(
3835                     Thread *thread, TraceDestination *trace,
3836                     T_CONTEXT *pContext, BYTE **pRetAddr)
3837 {
3838     WRAPPER_NO_CONTRACT;
3839
3840     // Find the owning manager. We should succeed, since presumably someone already
3841     // called CheckIsStub on us to find out that we own the address.
3842     VirtualCallStubManager *pMgr = FindVirtualCallStubManager(GetIP(pContext));
3843     CONSISTENCY_CHECK(CheckPointer(pMgr));
3844
3845     // Forward the call to the appropriate manager.
3846     return pMgr->TraceManager(thread, trace, pContext, pRetAddr);
3847 }