[Tizen] Unify dnetmemoryenumlib terms to match the codebase (#291)
[platform/upstream/coreclr.git] / src / utilcode / loaderheap.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 #include "stdafx.h"                     // Precompiled header key.
6 #include "loaderheap.h"
7 #include "ex.h"
8 #include "pedecoder.h"
9 #define DONOT_DEFINE_ETW_CALLBACK
10 #include "eventtracebase.h"
11
12 #define LHF_EXECUTABLE  0x1
13
14 #ifndef DACCESS_COMPILE
15
16 INDEBUG(DWORD UnlockedLoaderHeap::s_dwNumInstancesOfLoaderHeaps = 0;)
17
18 #ifdef RANDOMIZE_ALLOC
19 #include <time.h>
20 static class Random
21 {
22 public:
23     Random() { seed = (unsigned int)time(NULL); }
24     unsigned int Next()
25     {
26         return ((seed = seed * 214013L + 2531011L) >> 16) & 0x7fff;
27     }
28 private:
29     unsigned int seed;
30 } s_random;
31 #endif
32
33 namespace
34 {
35 #if !defined(SELF_NO_HOST) // ETW available only in the runtime
36     inline void EtwAllocRequest(UnlockedLoaderHeap * const pHeap, void* ptr, size_t dwSize)
37     {
38         FireEtwAllocRequest(pHeap, ptr, static_cast<unsigned int>(dwSize), 0, 0, GetClrInstanceId());
39     }
40 #else
41 #define EtwAllocRequest(pHeap, ptr, dwSize) ((void)0)
42 #endif // SELF_NO_HOST
43 }
44
45 //
46 // RangeLists are constructed so they can be searched from multiple
47 // threads without locking.  They do require locking in order to 
48 // be safely modified, though.
49 //
50
51 RangeList::RangeList()
52 {
53     WRAPPER_NO_CONTRACT;
54
55     InitBlock(&m_starterBlock);
56          
57     m_firstEmptyBlock = &m_starterBlock;
58     m_firstEmptyRange = 0;
59 }
60
61 RangeList::~RangeList()
62 {
63     LIMITED_METHOD_CONTRACT;
64     
65     RangeListBlock *b = m_starterBlock.next;
66
67     while (b != NULL)
68     {
69         RangeListBlock *bNext = b->next;
70         delete b;
71         b = bNext;
72     }
73 }
74
75 void RangeList::InitBlock(RangeListBlock *b)
76 {
77     LIMITED_METHOD_CONTRACT;
78
79     Range *r = b->ranges;
80     Range *rEnd = r + RANGE_COUNT; 
81     while (r < rEnd)
82         r++->id = NULL;
83
84     b->next = NULL;
85 }
86
87 BOOL RangeList::AddRangeWorker(const BYTE *start, const BYTE *end, void *id)
88 {
89     CONTRACTL
90     {
91         INSTANCE_CHECK;
92         NOTHROW;
93         GC_NOTRIGGER;
94         INJECT_FAULT(return FALSE;);
95     }
96     CONTRACTL_END
97
98     _ASSERTE(id != NULL);
99
100     RangeListBlock *b = m_firstEmptyBlock;
101     Range *r = b->ranges + m_firstEmptyRange;
102     Range *rEnd = b->ranges + RANGE_COUNT;
103
104     while (TRUE)
105     {
106         while (r < rEnd)
107         {
108             if (r->id == NULL)
109             {
110                 r->start = (TADDR)start;
111                 r->end = (TADDR)end;
112                 r->id = (TADDR)id;
113                 
114                 r++;
115
116                 m_firstEmptyBlock = b;
117                 m_firstEmptyRange = r - b->ranges;
118
119                 return TRUE;
120             }
121             r++;
122         }
123
124         //
125         // If there are no more blocks, allocate a 
126         // new one.
127         //
128
129         if (b->next == NULL)
130         {
131             RangeListBlock *newBlock = new (nothrow) RangeListBlock;
132
133             if (newBlock == NULL)
134             {
135                 m_firstEmptyBlock = b;
136                 m_firstEmptyRange = r - b->ranges;
137                 return FALSE;
138             }
139
140             InitBlock(newBlock);
141
142             newBlock->next = NULL;
143             b->next = newBlock;
144         }
145
146         //
147         // Next block
148         //
149
150         b = b->next;
151         r = b->ranges;
152         rEnd = r + RANGE_COUNT;
153     }
154 }
155
156 void RangeList::RemoveRangesWorker(void *id, const BYTE* start, const BYTE* end)
157 {
158     CONTRACTL
159     {
160         INSTANCE_CHECK;
161         NOTHROW;
162         GC_NOTRIGGER;
163         FORBID_FAULT;
164     }
165     CONTRACTL_END
166
167     RangeListBlock *b = &m_starterBlock;
168     Range *r = b->ranges;
169     Range *rEnd = r + RANGE_COUNT;
170     
171     //
172     // Find the first free element, & mark it.
173     //
174
175     while (TRUE)
176     {
177         //
178         // Clear entries in this block.
179         //
180
181         while (r < rEnd)
182         {
183             if (r->id != NULL)
184             {
185                 if (start != NULL)
186                 {
187                     _ASSERTE(end != NULL);
188
189                     if (r->start >= (TADDR)start && r->start < (TADDR)end)
190                     {
191                         CONSISTENCY_CHECK_MSGF(r->end >= (TADDR)start &&
192                                                r->end <= (TADDR)end,
193                                                ("r: %p start: %p end: %p", r, start, end));
194                         r->id = NULL;
195                     }
196                 }
197                 else if (r->id == (TADDR)id)
198                 {
199                     r->id = NULL;
200                 }
201             }
202
203             r++;
204         }
205
206         //
207         // If there are no more blocks, we're done.
208         //
209
210         if (b->next == NULL)
211         {
212             m_firstEmptyRange = 0;
213             m_firstEmptyBlock = &m_starterBlock;
214
215             return; 
216         }
217
218         // 
219         // Next block.
220         // 
221
222         b = b->next;
223         r = b->ranges;
224         rEnd = r + RANGE_COUNT;
225     }
226 }
227
228 #endif // #ifndef DACCESS_COMPILE
229
230 BOOL RangeList::IsInRangeWorker(TADDR address, TADDR *pID /* = NULL */)
231 {
232     CONTRACTL
233     {
234         INSTANCE_CHECK;
235         NOTHROW;
236         FORBID_FAULT;
237         GC_NOTRIGGER;
238     }
239     CONTRACTL_END
240         
241     SUPPORTS_DAC;
242
243     RangeListBlock* b = &m_starterBlock;
244     Range* r = b->ranges;
245     Range* rEnd = r + RANGE_COUNT;
246
247     //
248     // Look for a matching element
249     //
250
251     while (TRUE)
252     {
253         while (r < rEnd)
254         {
255             if (r->id != NULL &&
256                 address >= r->start 
257                 && address < r->end)
258             {
259                 if (pID != NULL)
260                 {
261                     *pID = r->id;
262                 }
263                 return TRUE;
264             }
265             r++;
266         }
267         
268         //
269         // If there are no more blocks, we're done.
270         //
271
272         if (b->next == NULL)
273             return FALSE;
274
275         // 
276         // Next block.
277         // 
278
279         b = b->next;
280         r = b->ranges;
281         rEnd = r + RANGE_COUNT;
282     }
283 }
284
285 #ifdef DACCESS_COMPILE
286
287 void
288 RangeList::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
289 {
290     SUPPORTS_DAC;
291     WRAPPER_NO_CONTRACT;
292
293     // This class is almost always contained in something
294     // else so there's no enumeration of 'this'.
295     
296     RangeListBlock* block = &m_starterBlock;
297     block->EnumMemoryRegions(flags);
298
299     while (block->next.IsValid())
300     {
301         block->next.EnumMem();
302         block = block->next;
303         
304         block->EnumMemoryRegions(flags);
305     }
306 }
307
308 void 
309 RangeList::RangeListBlock::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
310 {
311     WRAPPER_NO_CONTRACT;
312
313     Range*          range;
314     TADDR           BADFOOD;
315     TSIZE_T         size;
316     int             i;
317
318     // The code below iterates each range stored in the RangeListBlock and 
319     // dumps the memory region represented by each range.
320     // It is too much memory for a mini-dump, so we just bail out for mini-dumps.
321     if (flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE)
322     {
323         return;
324     }
325
326     WIN64_ONLY( BADFOOD = 0xbaadf00dbaadf00d; );
327     NOT_WIN64(  BADFOOD = 0xbaadf00d;         );
328
329     for (i=0; i<RANGE_COUNT; i++)
330     {
331         range = &(this->ranges[i]);
332         if (range->id == NULL || range->start == NULL || range->end == NULL || 
333             // just looking at the lower 4bytes is good enough on WIN64 
334             range->start == BADFOOD || range->end == BADFOOD)
335         {
336             break;
337         }
338
339         size = range->end - range->start;
340         _ASSERTE( size < ULONG_MAX );    // ranges should be less than 4gig!
341
342         // We can't be sure this entire range is mapped.  For example, the code:StubLinkStubManager
343         // keeps track of all ranges in the code:BaseDomain::m_pStubHeap LoaderHeap, and 
344         // code:LoaderHeap::UnlockedReservePages adds a range for the entire reserved region, instead
345         // of updating the RangeList when pages are committed.  But in that case, the committed region of
346         // memory will be enumerated by the LoaderHeap anyway, so it's OK if this fails
347         DacEnumMemoryRegion(range->start, size, false);
348     }    
349 }
350
351 #endif // #ifdef DACCESS_COMPILE
352
353
354 //=====================================================================================
355 // In DEBUG builds only, we tag live blocks with the requested size and the type of
356 // allocation (AllocMem, AllocAlignedMem, AllocateOntoReservedMem). This is strictly
357 // to validate that those who call Backout* are passing in the right values.
358 //
359 // For simplicity, we'll use one LoaderHeapValidationTag structure for all types even
360 // though not all fields are applicable to all types.
361 //=====================================================================================
362 #ifdef _DEBUG
363 enum AllocationType
364 {
365     kAllocMem = 1,
366     kFreedMem = 4,
367 };
368
369 struct LoaderHeapValidationTag
370 {
371     size_t         m_dwRequestedSize;      // What the caller requested (not what was actually allocated)
372     AllocationType m_allocationType;       // Which api allocated this block.
373     const char *   m_szFile;               // Who allocated me
374     int            m_lineNum;              // Who allocated me
375
376 };
377 #endif //_DEBUG
378
379
380
381
382
383 //=====================================================================================
384 // These classes do detailed loaderheap sniffing to help in debugging heap crashes
385 //=====================================================================================
386 #ifdef _DEBUG
387
388 // This structure logs the results of an Alloc or Free call. They are stored in reverse time order
389 // with UnlockedLoaderHeap::m_pEventList pointing to the most recent event.
390 struct LoaderHeapEvent
391 {
392     LoaderHeapEvent *m_pNext;
393     AllocationType   m_allocationType;      //Which api was called
394     const char      *m_szFile;              //Caller Id
395     int              m_lineNum;             //Caller Id
396     const char      *m_szAllocFile;         //(BackoutEvents): Who allocated the block?
397     int              m_allocLineNum;        //(BackoutEvents): Who allocated the block?
398     void            *m_pMem;                //Starting address of block
399     size_t           m_dwRequestedSize;     //Requested size of block
400     size_t           m_dwSize;              //Actual size of block (including validation tags, padding, everything)
401
402
403     void Describe(SString *pSString)
404     {
405         CONTRACTL
406         {
407             INSTANCE_CHECK;
408             DISABLED(NOTHROW);
409             GC_NOTRIGGER;
410         }
411         CONTRACTL_END
412
413         pSString->AppendASCII("\n");
414
415         {
416             StackSString buf;
417             if (m_allocationType == kFreedMem)
418             {
419                 buf.Printf("    Freed at:         %s (line %d)\n", m_szFile, m_lineNum);
420                 buf.Printf("       (block originally allocated at %s (line %d)\n", m_szAllocFile, m_allocLineNum);
421             }
422             else
423             {
424                 buf.Printf("    Allocated at:     %s (line %d)\n", m_szFile, m_lineNum);
425             }
426             pSString->Append(buf);
427         }
428
429         if (!QuietValidate())
430         {
431             pSString->AppendASCII("    *** THIS BLOCK HAS BEEN CORRUPTED ***\n");
432         }
433
434
435
436         {
437             StackSString buf;
438             buf.Printf("    Type:          ");
439             switch (m_allocationType)
440             {
441                 case kAllocMem:
442                     buf.AppendASCII("AllocMem()\n");
443                     break;
444                 case kFreedMem:
445                     buf.AppendASCII("Free\n");
446                     break;
447                 default:
448                     break;
449             }
450             pSString->Append(buf);
451         }
452
453
454         {
455             StackSString buf;
456             buf.Printf("    Start of block:       0x%p\n", m_pMem);
457             pSString->Append(buf);
458         }
459
460         {
461             StackSString buf;
462             buf.Printf("    End of block:         0x%p\n", ((BYTE*)m_pMem) + m_dwSize - 1);
463             pSString->Append(buf);
464         }
465
466         {
467             StackSString buf;
468             buf.Printf("    Requested size:       %lu (0x%lx)\n", (ULONG)m_dwRequestedSize, (ULONG)m_dwRequestedSize);
469             pSString->Append(buf);
470         }
471
472         {
473             StackSString buf;
474             buf.Printf("    Actual size:          %lu (0x%lx)\n", (ULONG)m_dwSize, (ULONG)m_dwSize);
475             pSString->Append(buf);
476         }
477
478         pSString->AppendASCII("\n");
479     }
480
481
482
483     BOOL QuietValidate();
484
485 };
486
487
488 class LoaderHeapSniffer
489 {
490     public:
491         static DWORD InitDebugFlags()
492         {
493             WRAPPER_NO_CONTRACT;
494
495             DWORD dwDebugFlags = 0;
496             if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_LoaderHeapCallTracing))
497             {
498                 dwDebugFlags |= UnlockedLoaderHeap::kCallTracing;
499             }
500             return dwDebugFlags;
501         }
502
503
504         static VOID RecordEvent(UnlockedLoaderHeap *pHeap,
505                                 AllocationType allocationType,
506                                 __in const char *szFile,
507                                 int            lineNum,
508                                 __in const char *szAllocFile,
509                                 int            allocLineNum,
510                                 void          *pMem,
511                                 size_t         dwRequestedSize,
512                                 size_t         dwSize
513                                 );
514
515         static VOID ClearEvents(UnlockedLoaderHeap *pHeap)
516         {
517             STATIC_CONTRACT_NOTHROW;
518             STATIC_CONTRACT_FORBID_FAULT;
519
520             LoaderHeapEvent *pEvent = pHeap->m_pEventList;
521             while (pEvent)
522             {
523                 LoaderHeapEvent *pNext = pEvent->m_pNext;
524                 delete pEvent;
525                 pEvent = pNext;
526             }
527             pHeap->m_pEventList = NULL;
528         }
529
530
531         static VOID CompactEvents(UnlockedLoaderHeap *pHeap)
532         {
533             STATIC_CONTRACT_NOTHROW;
534             STATIC_CONTRACT_FORBID_FAULT;
535
536             LoaderHeapEvent **ppEvent = &(pHeap->m_pEventList);
537             while (*ppEvent)
538             {
539                 LoaderHeapEvent *pEvent = *ppEvent;
540                 if (pEvent->m_allocationType != kFreedMem)
541                 {
542                     ppEvent = &(pEvent->m_pNext);
543                 }
544                 else
545                 {
546                     LoaderHeapEvent **ppWalk = &(pEvent->m_pNext);
547                     BOOL fMatchFound = FALSE;
548                     while (*ppWalk && !fMatchFound)
549                     {
550                         LoaderHeapEvent *pWalk = *ppWalk;
551                         if (pWalk->m_allocationType  != kFreedMem &&
552                             pWalk->m_pMem            == pEvent->m_pMem &&
553                             pWalk->m_dwRequestedSize == pEvent->m_dwRequestedSize)
554                         {
555                             // Delete matched pairs
556
557                             // Order is important here - updating *ppWalk may change pEvent->m_pNext, and we want
558                             // to get the updated value when we unlink pEvent.
559                             *ppWalk = pWalk->m_pNext;
560                             *ppEvent = pEvent->m_pNext;
561
562                             delete pEvent;
563                             delete pWalk;
564                             fMatchFound = TRUE;
565                         }
566                         else
567                         {
568                             ppWalk = &(pWalk->m_pNext);
569                         }
570                     }
571
572                     if (!fMatchFound)
573                     {
574                         ppEvent = &(pEvent->m_pNext);
575                     }
576                 }
577             }
578         }
579         static VOID PrintEvents(UnlockedLoaderHeap *pHeap)
580         {
581             STATIC_CONTRACT_NOTHROW;
582             STATIC_CONTRACT_FORBID_FAULT;
583
584             printf("\n------------- LoaderHeapEvents (in reverse time order!) --------------------");
585
586             LoaderHeapEvent *pEvent = pHeap->m_pEventList;
587             while (pEvent)
588             {
589                 printf("\n");
590                 switch (pEvent->m_allocationType)
591                 {
592                     case kAllocMem:         printf("AllocMem        "); break;
593                     case kFreedMem:         printf("BackoutMem      "); break;
594
595                 }
596                 printf(" ptr = 0x%-8p", pEvent->m_pMem);
597                 printf(" rqsize = 0x%-8x", pEvent->m_dwRequestedSize);
598                 printf(" actsize = 0x%-8x", pEvent->m_dwSize);
599                 printf(" (at %s@%d)", pEvent->m_szFile, pEvent->m_lineNum);
600                 if (pEvent->m_allocationType == kFreedMem)
601                 {
602                     printf(" (original allocation at %s@%d)", pEvent->m_szAllocFile, pEvent->m_allocLineNum);
603                 }
604
605                 pEvent = pEvent->m_pNext;
606
607             }
608             printf("\n------------- End of LoaderHeapEvents --------------------------------------");
609             printf("\n");
610
611         }
612
613
614         static VOID PitchSniffer(SString *pSString)
615         {
616             WRAPPER_NO_CONTRACT;
617             pSString->AppendASCII("\n"
618                              "\nBecause call-tracing wasn't turned on, we couldn't provide details about who last owned the affected memory block. To get more precise diagnostics,"
619                              "\nset the following registry DWORD value:"
620                              "\n"
621                              "\n    HKLM\\Software\\Microsoft\\.NETFramework\\LoaderHeapCallTracing = 1"
622                              "\n"
623                              "\nand rerun the scenario that crashed."
624                              "\n"
625                              "\n");
626         }
627
628         static LoaderHeapEvent *FindEvent(UnlockedLoaderHeap *pHeap, void *pAddr)
629         {
630             LIMITED_METHOD_CONTRACT;
631
632             LoaderHeapEvent *pEvent = pHeap->m_pEventList;
633             while (pEvent)
634             {
635                 if (pAddr >= pEvent->m_pMem && pAddr <= ( ((BYTE*)pEvent->m_pMem) + pEvent->m_dwSize - 1))
636                 {
637                     return pEvent;
638                 }
639                 pEvent = pEvent->m_pNext;
640             }
641             return NULL;
642
643         }
644
645
646         static void ValidateFreeList(UnlockedLoaderHeap *pHeap);
647
648         static void WeGotAFaultNowWhat(UnlockedLoaderHeap *pHeap)
649         {
650             WRAPPER_NO_CONTRACT;
651             ValidateFreeList(pHeap);
652
653             //If none of the above popped up an assert, pop up a generic one.
654             _ASSERTE(!("Unexpected AV inside LoaderHeap. The usual reason is that someone overwrote the end of a block or wrote into a freed block.\n"));
655                        
656         }
657
658 };
659
660
661 #endif
662
663
664 #ifdef _DEBUG
665 #define LOADER_HEAP_BEGIN_TRAP_FAULT BOOL __faulted = FALSE; EX_TRY {
666 #define LOADER_HEAP_END_TRAP_FAULT   } EX_CATCH {__faulted = TRUE; } EX_END_CATCH(SwallowAllExceptions) if (__faulted) LoaderHeapSniffer::WeGotAFaultNowWhat(pHeap);
667 #else
668 #define LOADER_HEAP_BEGIN_TRAP_FAULT 
669 #define LOADER_HEAP_END_TRAP_FAULT   
670 #endif
671
672
673 size_t AllocMem_TotalSize(size_t dwRequestedSize, UnlockedLoaderHeap *pHeap);
674
675 //=====================================================================================
676 // This freelist implementation is a first cut and probably needs to be tuned.
677 // It should be tuned with the following assumptions:
678 //
679 //    - Freeing LoaderHeap memory is done primarily for OOM backout. LoaderHeaps
680 //      weren't designed to be general purpose heaps and shouldn't be used that way.
681 //
682 //    - And hence, when memory is freed, expect it to be freed in large clumps and in a
683 //      LIFO order. Since the LoaderHeap normally hands out memory with sequentially
684 //      increasing addresses, blocks will typically be freed with sequentially decreasing
685 //      addresses.
686 //
687 // The first cut of the freelist is a single-linked list of free blocks using first-fit.
688 // Assuming the above alloc-free pattern holds, the list will end up mostly sorted
689 // in increasing address order. When a block is freed, we'll attempt to coalesce it
690 // with the first block in the list. We could also choose to be more aggressive about
691 // sorting and coalescing but this should probably catch most cases in practice.
692 //=====================================================================================
693
694 // When a block is freed, we place this structure on the first bytes of the freed block (Allocations
695 // are bumped in size if necessary to make sure there's room.)
696 struct LoaderHeapFreeBlock
697 {
698     public:
699         LoaderHeapFreeBlock   *m_pNext;    // Pointer to next block on free list
700         size_t                 m_dwSize;   // Total size of this block (including this header)
701 //! Try not to grow the size of this structure. It places a minimum size on LoaderHeap allocations.
702
703         static void InsertFreeBlock(LoaderHeapFreeBlock **ppHead, void *pMem, size_t dwTotalSize, UnlockedLoaderHeap *pHeap)
704         {
705             STATIC_CONTRACT_NOTHROW;
706             STATIC_CONTRACT_GC_NOTRIGGER;
707
708             LOADER_HEAP_BEGIN_TRAP_FAULT
709
710             // It's illegal to insert a free block that's smaller than the minimum sized allocation -
711             // it may stay stranded on the freelist forever.
712 #ifdef _DEBUG
713             if (!(dwTotalSize >= AllocMem_TotalSize(1, pHeap)))
714             {
715                 LoaderHeapSniffer::ValidateFreeList(pHeap);
716                 _ASSERTE(dwTotalSize >= AllocMem_TotalSize(1, pHeap));
717             }
718
719             if (!(0 == (dwTotalSize & ALLOC_ALIGN_CONSTANT)))
720             {
721                 LoaderHeapSniffer::ValidateFreeList(pHeap);
722                 _ASSERTE(0 == (dwTotalSize & ALLOC_ALIGN_CONSTANT));
723             }
724 #endif
725
726             INDEBUG(memset(pMem, 0xcc, dwTotalSize);)
727             LoaderHeapFreeBlock *pNewBlock = (LoaderHeapFreeBlock*)pMem;
728             pNewBlock->m_pNext  = *ppHead;
729             pNewBlock->m_dwSize = dwTotalSize;
730             *ppHead = pNewBlock;
731
732             MergeBlock(pNewBlock, pHeap);
733
734             LOADER_HEAP_END_TRAP_FAULT
735         }
736
737
738         static void *AllocFromFreeList(LoaderHeapFreeBlock **ppHead, size_t dwSize, BOOL fRemoveFromFreeList, UnlockedLoaderHeap *pHeap)
739         {
740             STATIC_CONTRACT_NOTHROW;
741             STATIC_CONTRACT_GC_NOTRIGGER;
742
743             INCONTRACT(_ASSERTE_IMPL(!ARE_FAULTS_FORBIDDEN()));
744
745             void *pResult = NULL;
746             LOADER_HEAP_BEGIN_TRAP_FAULT
747
748             LoaderHeapFreeBlock **ppWalk = ppHead;
749             while (*ppWalk)
750             {
751                 LoaderHeapFreeBlock *pCur = *ppWalk;
752                 size_t dwCurSize = pCur->m_dwSize;
753                 if (dwCurSize == dwSize)
754                 {
755                     pResult = pCur;
756                     // Exact match. Hooray!
757                     if (fRemoveFromFreeList)
758                     {
759                         *ppWalk = pCur->m_pNext;
760                     }
761                     break;
762                 }
763                 else if (dwCurSize > dwSize && (dwCurSize - dwSize) >= AllocMem_TotalSize(1, pHeap))
764                 {
765                     // Partial match. Ok...
766                     pResult = pCur;
767                     if (fRemoveFromFreeList)
768                     {
769                         *ppWalk = pCur->m_pNext;
770                         InsertFreeBlock(ppWalk, ((BYTE*)pCur) + dwSize, dwCurSize - dwSize, pHeap );
771                     }
772                     break;
773                 }
774
775                 // Either block is too small or splitting the block would leave a remainder that's smaller than
776                 // the minimum block size. Onto next one.
777
778                 ppWalk = &( pCur->m_pNext );
779             }
780
781             if (pResult && fRemoveFromFreeList)
782             {
783                 // Callers of loaderheap assume allocated memory is zero-inited so we must preserve this invariant!
784                 memset(pResult, 0, dwSize);
785             }
786             LOADER_HEAP_END_TRAP_FAULT
787             return pResult;
788
789
790
791         }
792
793
794     private:
795         // Try to merge pFreeBlock with its immediate successor. Return TRUE if a merge happened. FALSE if no merge happened. 
796         static BOOL MergeBlock(LoaderHeapFreeBlock *pFreeBlock, UnlockedLoaderHeap *pHeap)
797         {
798             STATIC_CONTRACT_NOTHROW;
799
800             BOOL result = FALSE;
801
802             LOADER_HEAP_BEGIN_TRAP_FAULT
803
804             LoaderHeapFreeBlock *pNextBlock = pFreeBlock->m_pNext;
805             size_t               dwSize     = pFreeBlock->m_dwSize;
806
807             if (pNextBlock == NULL || ((BYTE*)pNextBlock) != (((BYTE*)pFreeBlock) + dwSize))
808             {
809                 result = FALSE;
810             }
811             else
812             {
813                 size_t dwCombinedSize = dwSize + pNextBlock->m_dwSize;
814                 LoaderHeapFreeBlock *pNextNextBlock = pNextBlock->m_pNext;
815                 INDEBUG(memset(pFreeBlock, 0xcc, dwCombinedSize);)
816                 pFreeBlock->m_pNext  = pNextNextBlock;
817                 pFreeBlock->m_dwSize = dwCombinedSize;
818
819                 result = TRUE;
820             }
821
822             LOADER_HEAP_END_TRAP_FAULT
823             return result;
824
825         }
826
827 };
828
829
830
831
832 //=====================================================================================
833 // These helpers encapsulate the actual layout of a block allocated by AllocMem
834 // and UnlockedAllocMem():
835 //
836 // ==> Starting address is always pointer-aligned.
837 //
838 //   - x  bytes of user bytes        (where "x" is the actual dwSize passed into AllocMem)
839 //
840 //   - y  bytes of "EE" (DEBUG-ONLY) (where "y" == LOADER_HEAP_DEBUG_BOUNDARY (normally 0))
841 //   - z  bytes of pad  (DEBUG-ONLY) (where "z" is just enough to pointer-align the following byte)
842 //   - a  bytes of tag  (DEBUG-ONLY) (where "a" is sizeof(LoaderHeapValidationTag)
843 //
844 //   - b  bytes of pad               (if total size after all this < sizeof(LoaderHeapFreeBlock), pad enough to make it the size of LoaderHeapFreeBlock)
845 //   - c  bytes of pad               (where "c" is just enough to pointer-align the following byte)
846 //
847 // ==> Following address is always pointer-aligned
848 //=====================================================================================
849
850 // Convert the requested size into the total # of bytes we'll actually allocate (including padding)
851 inline size_t AllocMem_TotalSize(size_t dwRequestedSize, UnlockedLoaderHeap *pHeap)
852 {
853     LIMITED_METHOD_CONTRACT;
854
855     size_t dwSize = dwRequestedSize;
856 #ifdef _DEBUG
857     dwSize += LOADER_HEAP_DEBUG_BOUNDARY;
858     dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
859
860     if (!pHeap->m_fExplicitControl)
861     {
862         dwSize += sizeof(LoaderHeapValidationTag);
863     }
864 #endif
865     if (!pHeap->m_fExplicitControl)
866     {
867         if (dwSize < sizeof(LoaderHeapFreeBlock))
868         {
869             dwSize = sizeof(LoaderHeapFreeBlock);
870         } 
871     }
872     dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
873
874     return dwSize;
875 }
876
877
878 #ifdef _DEBUG
879 LoaderHeapValidationTag *AllocMem_GetTag(LPVOID pBlock, size_t dwRequestedSize)
880 {
881     LIMITED_METHOD_CONTRACT;
882
883     size_t dwSize = dwRequestedSize;
884     dwSize += LOADER_HEAP_DEBUG_BOUNDARY;
885     dwSize = ((dwSize + ALLOC_ALIGN_CONSTANT) & (~ALLOC_ALIGN_CONSTANT));
886     return (LoaderHeapValidationTag *)( ((BYTE*)pBlock) + dwSize );
887 }
888 #endif
889
890
891
892
893
894 //=====================================================================================
895 // UnlockedLoaderHeap methods
896 //=====================================================================================
897
898 #ifndef DACCESS_COMPILE
899
900 UnlockedLoaderHeap::UnlockedLoaderHeap(DWORD dwReserveBlockSize,
901                                        DWORD dwCommitBlockSize,
902                                        const BYTE* dwReservedRegionAddress,
903                                        SIZE_T dwReservedRegionSize, 
904                                        size_t *pPrivatePerfCounter_LoaderBytes,
905                                        RangeList *pRangeList,
906                                        BOOL fMakeExecutable)
907 {
908     CONTRACTL
909     {
910         CONSTRUCTOR_CHECK;
911         NOTHROW;
912         FORBID_FAULT;
913     }
914     CONTRACTL_END;
915     
916     m_pCurBlock                  = NULL;
917     m_pFirstBlock                = NULL;
918
919     m_dwReserveBlockSize         = dwReserveBlockSize;
920     m_dwCommitBlockSize          = dwCommitBlockSize;
921
922     m_pPtrToEndOfCommittedRegion = NULL;
923     m_pEndReservedRegion         = NULL;
924     m_pAllocPtr                  = NULL;
925
926     m_pRangeList                 = pRangeList;
927
928     // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY
929     m_dwTotalAlloc               = 0;
930
931 #ifdef _DEBUG
932     m_dwDebugWastedBytes         = 0;
933     s_dwNumInstancesOfLoaderHeaps++;
934     m_pEventList                 = NULL;
935     m_dwDebugFlags               = LoaderHeapSniffer::InitDebugFlags();
936     m_fPermitStubsWithUnwindInfo = FALSE;
937     m_fStubUnwindInfoUnregistered= FALSE;
938 #endif
939
940     m_pPrivatePerfCounter_LoaderBytes = pPrivatePerfCounter_LoaderBytes;
941
942     m_Options                    = 0;
943
944 #ifndef CROSSGEN_COMPILE
945     if (fMakeExecutable)
946         m_Options                |= LHF_EXECUTABLE;
947 #endif // CROSSGEN_COMPILE
948
949     m_pFirstFreeBlock            = NULL;
950
951     if (dwReservedRegionAddress != NULL && dwReservedRegionSize > 0)
952     {
953         m_reservedBlock.Init((void *)dwReservedRegionAddress, dwReservedRegionSize, FALSE);
954     }
955 }
956
957 // ~LoaderHeap is not synchronised (obviously)
958 UnlockedLoaderHeap::~UnlockedLoaderHeap()
959 {
960     CONTRACTL
961     {
962         DESTRUCTOR_CHECK;
963         NOTHROW;
964         FORBID_FAULT;
965     }
966     CONTRACTL_END
967
968     _ASSERTE(!m_fPermitStubsWithUnwindInfo || m_fStubUnwindInfoUnregistered);
969
970     if (m_pRangeList != NULL)
971         m_pRangeList->RemoveRanges((void *) this);
972
973     LoaderHeapBlock *pSearch, *pNext;
974
975     for (pSearch = m_pFirstBlock; pSearch; pSearch = pNext)
976     {
977         void *  pVirtualAddress;
978         BOOL    fReleaseMemory;
979
980         pVirtualAddress = pSearch->pVirtualAddress;
981         fReleaseMemory = pSearch->m_fReleaseMemory;
982         pNext = pSearch->pNext;
983                 
984         if (fReleaseMemory)
985         {    
986             BOOL fSuccess;
987             fSuccess = ClrVirtualFree(pVirtualAddress, 0, MEM_RELEASE);
988             _ASSERTE(fSuccess);
989         }
990     }
991
992     if (m_reservedBlock.m_fReleaseMemory)
993     {
994         BOOL fSuccess;
995         fSuccess = ClrVirtualFree(m_reservedBlock.pVirtualAddress, 0, MEM_RELEASE);
996         _ASSERTE(fSuccess);
997     }
998
999     if (m_pPrivatePerfCounter_LoaderBytes)
1000         *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes - (DWORD) m_dwTotalAlloc;
1001
1002     INDEBUG(s_dwNumInstancesOfLoaderHeaps --;)
1003 }
1004
1005 void UnlockedLoaderHeap::UnlockedSetReservedRegion(BYTE* dwReservedRegionAddress, SIZE_T dwReservedRegionSize, BOOL fReleaseMemory)
1006 {
1007     WRAPPER_NO_CONTRACT;
1008     _ASSERTE(m_reservedBlock.pVirtualAddress == NULL);
1009     m_reservedBlock.Init((void *)dwReservedRegionAddress, dwReservedRegionSize, fReleaseMemory);
1010 }
1011
1012 #endif // #ifndef DACCESS_COMPILE
1013
1014 #if 0
1015 // Disables access to all pages in the heap - useful when trying to determine if someone is
1016 // accessing something in the low frequency heap
1017 void UnlockedLoaderHeap::DebugGuardHeap()
1018 {
1019     WRAPPER_NO_CONTRACT;
1020     LoaderHeapBlock *pSearch, *pNext;
1021
1022     for (pSearch = m_pFirstBlock; pSearch; pSearch = pNext)
1023     {
1024         void *  pResult;
1025         void *  pVirtualAddress;
1026
1027         pVirtualAddress = pSearch->pVirtualAddress;
1028         pNext = pSearch->pNext;
1029
1030         pResult = ClrVirtualAlloc(pVirtualAddress, pSearch->dwVirtualSize, MEM_COMMIT, PAGE_NOACCESS);
1031         _ASSERTE(pResult != NULL);
1032     }
1033 }
1034 #endif
1035
1036 size_t UnlockedLoaderHeap::GetBytesAvailCommittedRegion()
1037 {
1038     LIMITED_METHOD_CONTRACT;
1039
1040     if (m_pAllocPtr < m_pPtrToEndOfCommittedRegion)
1041         return (size_t)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr);
1042     else
1043         return 0;
1044 }
1045
1046 size_t UnlockedLoaderHeap::GetBytesAvailReservedRegion()
1047 {
1048     LIMITED_METHOD_CONTRACT;
1049
1050     if (m_pAllocPtr < m_pEndReservedRegion)
1051         return (size_t)(m_pEndReservedRegion- m_pAllocPtr);
1052     else
1053         return 0;
1054 }
1055
1056 #define SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve)                     \
1057         m_pPtrToEndOfCommittedRegion = (BYTE *) (pData) + (dwSizeToCommit);         \
1058         m_pAllocPtr                  = (BYTE *) (pData) + sizeof(LoaderHeapBlock);  \
1059         m_pEndReservedRegion         = (BYTE *) (pData) + (dwSizeToReserve);
1060
1061
1062 #ifndef DACCESS_COMPILE
1063
1064 BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
1065 {
1066     CONTRACTL
1067     {
1068         INSTANCE_CHECK;
1069         NOTHROW;
1070         INJECT_FAULT(return FALSE;);
1071     }
1072     CONTRACTL_END;
1073     
1074     size_t dwSizeToReserve;
1075
1076     // Add sizeof(LoaderHeapBlock)
1077     dwSizeToCommit += sizeof(LoaderHeapBlock);
1078
1079     // Round to page size again
1080     dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize());
1081
1082     void *pData = NULL;
1083     BOOL fReleaseMemory = TRUE;
1084
1085     // We were provided with a reserved memory block at instance creation time, so use it if it's big enough.
1086     if (m_reservedBlock.pVirtualAddress != NULL &&
1087         m_reservedBlock.dwVirtualSize >= dwSizeToCommit)
1088     {
1089         // Get the info out of the block.
1090         pData = m_reservedBlock.pVirtualAddress;
1091         dwSizeToReserve = m_reservedBlock.dwVirtualSize;
1092         fReleaseMemory = m_reservedBlock.m_fReleaseMemory;
1093
1094         // Zero the block so this memory doesn't get used again.
1095         m_reservedBlock.Init(NULL, 0, FALSE);
1096     }
1097     // The caller is asking us to allocate the memory
1098     else
1099     {
1100         if (m_fExplicitControl)
1101         {
1102             return FALSE;
1103         }
1104
1105         // Figure out how much to reserve
1106         dwSizeToReserve = max(dwSizeToCommit, m_dwReserveBlockSize);
1107
1108         // Round to VIRTUAL_ALLOC_RESERVE_GRANULARITY
1109         dwSizeToReserve = ALIGN_UP(dwSizeToReserve, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
1110
1111         _ASSERTE(dwSizeToCommit <= dwSizeToReserve);    
1112
1113         //
1114         // Reserve pages
1115         //
1116
1117         pData = ClrVirtualAllocExecutable(dwSizeToReserve, MEM_RESERVE, PAGE_NOACCESS);
1118         if (pData == NULL)
1119         {
1120             return FALSE;
1121         }
1122     }
1123
1124     // When the user passes in the reserved memory, the commit size is 0 and is adjusted to be the sizeof(LoaderHeap). 
1125     // If for some reason this is not true then we just catch this via an assertion and the dev who changed code
1126     // would have to add logic here to handle the case when committed mem is more than the reserved mem. One option 
1127     // could be to leak the users memory and reserve+commit a new block, Another option would be to fail the alloc mem
1128     // and notify the user to provide more reserved mem.
1129     _ASSERTE((dwSizeToCommit <= dwSizeToReserve) && "Loaderheap tried to commit more memory than reserved by user");
1130
1131     if (pData == NULL)
1132     {
1133         //_ASSERTE(!"Unable to ClrVirtualAlloc reserve in a loaderheap");
1134         return FALSE;
1135     }
1136
1137     // Commit first set of pages, since it will contain the LoaderHeapBlock
1138     void *pTemp = ClrVirtualAlloc(pData, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
1139     if (pTemp == NULL)
1140     {
1141         //_ASSERTE(!"Unable to ClrVirtualAlloc commit in a loaderheap");
1142
1143         // Unable to commit - release pages
1144         if (fReleaseMemory)
1145             ClrVirtualFree(pData, 0, MEM_RELEASE);
1146
1147         return FALSE;
1148     }
1149
1150     if (m_pPrivatePerfCounter_LoaderBytes)
1151         *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes + (DWORD) dwSizeToCommit;
1152
1153     // Record reserved range in range list, if one is specified
1154     // Do this AFTER the commit - otherwise we'll have bogus ranges included.
1155     if (m_pRangeList != NULL)
1156     {
1157         if (!m_pRangeList->AddRange((const BYTE *) pData, 
1158                                     ((const BYTE *) pData) + dwSizeToReserve, 
1159                                     (void *) this))
1160         {
1161
1162             if (fReleaseMemory)
1163                 ClrVirtualFree(pData, 0, MEM_RELEASE);
1164
1165             return FALSE;
1166         }
1167     }
1168
1169     m_dwTotalAlloc += dwSizeToCommit;
1170
1171     LoaderHeapBlock *pNewBlock;
1172
1173     pNewBlock = (LoaderHeapBlock *) pData;
1174
1175     pNewBlock->dwVirtualSize    = dwSizeToReserve;
1176     pNewBlock->pVirtualAddress  = pData;
1177     pNewBlock->pNext            = NULL;
1178     pNewBlock->m_fReleaseMemory = fReleaseMemory;
1179
1180     LoaderHeapBlock *pCurBlock = m_pCurBlock;
1181
1182     // Add to linked list
1183     while (pCurBlock != NULL &&
1184            pCurBlock->pNext != NULL)
1185         pCurBlock = pCurBlock->pNext;
1186
1187     if (pCurBlock != NULL)        
1188         m_pCurBlock->pNext = pNewBlock;
1189     else
1190         m_pFirstBlock = pNewBlock;
1191
1192     // If we want to use the memory immediately...
1193     m_pCurBlock = pNewBlock;
1194
1195     SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve);
1196
1197     return TRUE;
1198 }
1199
1200 // Get some more committed pages - either commit some more in the current reserved region, or, if it
1201 // has run out, reserve another set of pages. 
1202 // Returns: FALSE if we can't get any more memory
1203 // TRUE: We can/did get some more memory - check to see if it's sufficient for
1204 //       the caller's needs (see UnlockedAllocMem for example of use)
1205 BOOL UnlockedLoaderHeap::GetMoreCommittedPages(size_t dwMinSize)
1206 {
1207     CONTRACTL
1208     {
1209         INSTANCE_CHECK;
1210         NOTHROW;
1211         INJECT_FAULT(return FALSE;);
1212     }
1213     CONTRACTL_END;
1214     
1215     // If we have memory we can use, what are you doing here!  
1216     _ASSERTE(dwMinSize > (SIZE_T)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr));
1217
1218     // Does this fit in the reserved region?
1219     if (dwMinSize <= (size_t)(m_pEndReservedRegion - m_pAllocPtr))
1220     {
1221         SIZE_T dwSizeToCommit = (m_pAllocPtr + dwMinSize) - m_pPtrToEndOfCommittedRegion;
1222
1223         if (dwSizeToCommit < m_dwCommitBlockSize)
1224             dwSizeToCommit = min((SIZE_T)(m_pEndReservedRegion - m_pPtrToEndOfCommittedRegion), (SIZE_T)m_dwCommitBlockSize);
1225
1226         // Round to page size
1227         dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize());
1228
1229         // Yes, so commit the desired number of reserved pages
1230         void *pData = ClrVirtualAlloc(m_pPtrToEndOfCommittedRegion, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
1231         if (pData == NULL)
1232             return FALSE;
1233
1234         if (m_pPrivatePerfCounter_LoaderBytes)
1235             *m_pPrivatePerfCounter_LoaderBytes = *m_pPrivatePerfCounter_LoaderBytes + (DWORD) dwSizeToCommit;
1236
1237         m_dwTotalAlloc += dwSizeToCommit;
1238
1239         m_pPtrToEndOfCommittedRegion += dwSizeToCommit;
1240         return TRUE;
1241     }
1242
1243     // Need to allocate a new set of reserved pages
1244     INDEBUG(m_dwDebugWastedBytes += (size_t)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr);)
1245
1246     // Note, there are unused reserved pages at end of current region -can't do much about that
1247     // Provide dwMinSize here since UnlockedReservePages will round up the commit size again
1248     // after adding in the size of the LoaderHeapBlock header.
1249     return UnlockedReservePages(dwMinSize);
1250 }
1251
1252 void *UnlockedLoaderHeap::UnlockedAllocMem(size_t dwSize
1253                                            COMMA_INDEBUG(__in const char *szFile)
1254                                            COMMA_INDEBUG(int  lineNum))
1255 {
1256     CONTRACT(void*)
1257     {
1258         INSTANCE_CHECK;
1259         THROWS;
1260         GC_NOTRIGGER;
1261         INJECT_FAULT(ThrowOutOfMemory(););
1262         POSTCONDITION(CheckPointer(RETVAL));
1263     }
1264     CONTRACT_END;
1265
1266     void *pResult = UnlockedAllocMem_NoThrow(
1267         dwSize COMMA_INDEBUG(szFile) COMMA_INDEBUG(lineNum));
1268
1269     if (pResult == NULL)
1270         ThrowOutOfMemory();
1271     
1272     RETURN pResult;
1273 }
1274
1275 #ifdef _DEBUG
1276 static DWORD ShouldInjectFault()
1277 {
1278     static DWORD fInjectFault = 99;
1279
1280     if (fInjectFault == 99)
1281         fInjectFault = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_InjectFault) != 0);
1282     return fInjectFault;
1283 }
1284
1285 #define SHOULD_INJECT_FAULT(return_statement)   \
1286     do {                                        \
1287         if (ShouldInjectFault() & 0x1)          \
1288         {                                       \
1289             char *a = new (nothrow) char;       \
1290             if (a == NULL)                      \
1291             {                                   \
1292                 return_statement;               \
1293             }                                   \
1294             delete a;                           \
1295         }                                       \
1296     } while (FALSE)
1297
1298 #else
1299
1300 #define SHOULD_INJECT_FAULT(return_statement) do { (void)((void *)0); } while (FALSE)
1301         
1302 #endif
1303
1304 void *UnlockedLoaderHeap::UnlockedAllocMem_NoThrow(size_t dwSize
1305                                                    COMMA_INDEBUG(__in const char *szFile)
1306                                                    COMMA_INDEBUG(int lineNum))
1307 {
1308     CONTRACT(void*)
1309     {
1310         INSTANCE_CHECK;
1311         NOTHROW;
1312         GC_NOTRIGGER;
1313         INJECT_FAULT(CONTRACT_RETURN NULL;);
1314         PRECONDITION(dwSize != 0);
1315         POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
1316     }
1317     CONTRACT_END;
1318     
1319     SHOULD_INJECT_FAULT(RETURN NULL);
1320
1321     INDEBUG(size_t dwRequestedSize = dwSize;)
1322
1323     INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1324
1325 #ifdef RANDOMIZE_ALLOC
1326     if (!m_fExplicitControl)
1327         dwSize += s_random.Next() % 256;
1328 #endif
1329
1330     dwSize = AllocMem_TotalSize(dwSize, this);
1331
1332 again:
1333
1334     {
1335         // Any memory available on the free list?
1336         void *pData = LoaderHeapFreeBlock::AllocFromFreeList(&m_pFirstFreeBlock, dwSize, TRUE /*fRemoveFromFreeList*/, this);
1337         if (!pData)
1338         {
1339             // Enough bytes available in committed region?
1340             if (dwSize <= GetBytesAvailCommittedRegion())
1341             {
1342                 pData = m_pAllocPtr;
1343                 m_pAllocPtr += dwSize;
1344             }
1345         }
1346
1347         if (pData)
1348         {
1349 #ifdef _DEBUG
1350     
1351             BYTE *pAllocatedBytes = (BYTE *)pData;
1352 #if LOADER_HEAP_DEBUG_BOUNDARY > 0
1353             // Don't fill the memory we allocated - it is assumed to be zeroed - fill the memory after it
1354             memset(pAllocatedBytes + dwRequestedSize, 0xEE, LOADER_HEAP_DEBUG_BOUNDARY);
1355 #endif
1356             if (dwRequestedSize > 0)
1357             {
1358                 _ASSERTE_MSG(pAllocatedBytes[0] == 0 && memcmp(pAllocatedBytes, pAllocatedBytes + 1, dwRequestedSize - 1) == 0,
1359                     "LoaderHeap must return zero-initialized memory");
1360             }
1361
1362             if (!m_fExplicitControl)
1363             {
1364                 LoaderHeapValidationTag *pTag = AllocMem_GetTag(pData, dwRequestedSize);
1365                 pTag->m_allocationType  = kAllocMem;
1366                 pTag->m_dwRequestedSize = dwRequestedSize;
1367                 pTag->m_szFile          = szFile;
1368                 pTag->m_lineNum         = lineNum;
1369             }
1370
1371             if (m_dwDebugFlags & kCallTracing)
1372             {
1373                 LoaderHeapSniffer::RecordEvent(this,
1374                                                kAllocMem,
1375                                                szFile,
1376                                                lineNum,
1377                                                szFile,
1378                                                lineNum,
1379                                                pData,
1380                                                dwRequestedSize,
1381                                                dwSize
1382                                                );
1383             }
1384     
1385 #endif
1386
1387             EtwAllocRequest(this, pData, dwSize);
1388             RETURN pData;
1389         }
1390     }
1391
1392     // Need to commit some more pages in reserved region.
1393     // If we run out of pages in the reserved region, ClrVirtualAlloc some more pages
1394     if (GetMoreCommittedPages(dwSize))
1395         goto again;
1396
1397     // We could not satisfy this allocation request
1398     RETURN NULL;
1399 }
1400
1401 void UnlockedLoaderHeap::UnlockedBackoutMem(void *pMem,
1402                                             size_t dwRequestedSize
1403                                             COMMA_INDEBUG(__in const char *szFile)
1404                                             COMMA_INDEBUG(int  lineNum)
1405                                             COMMA_INDEBUG(__in const char *szAllocFile)
1406                                             COMMA_INDEBUG(int  allocLineNum))
1407 {
1408     CONTRACTL
1409     {
1410         INSTANCE_CHECK;
1411         NOTHROW;
1412         FORBID_FAULT;
1413     }
1414     CONTRACTL_END;
1415     
1416     // Because the primary use of this function is backout, we'll be nice and
1417     // define Backout(NULL) be a legal NOP.
1418     if (pMem == NULL)
1419     {
1420         return;
1421     }
1422
1423 #ifdef _DEBUG
1424     {
1425         DEBUG_ONLY_REGION();
1426         
1427         LoaderHeapValidationTag *pTag = AllocMem_GetTag(pMem, dwRequestedSize);
1428
1429         if (pTag->m_dwRequestedSize != dwRequestedSize || pTag->m_allocationType != kAllocMem)
1430         {
1431             CONTRACT_VIOLATION(ThrowsViolation|FaultViolation); // We're reporting a heap corruption - who cares about violations
1432
1433             StackSString message;
1434             message.Printf("HEAP VIOLATION: Invalid BackoutMem() call made at:\n"
1435                            "\n"
1436                            "     File: %s\n"
1437                            "     Line: %d\n"
1438                            "\n"
1439                            "Attempting to free block originally allocated at:\n"
1440                            "\n"
1441                            "     File: %s\n"
1442                            "     Line: %d\n"
1443                            "\n"
1444                            "The arguments to BackoutMem() were:\n"
1445                            "\n"
1446                            "     Pointer: 0x%p\n"
1447                            "     Size:    %lu (0x%lx)\n"
1448                            "\n"
1449                            ,szFile
1450                            ,lineNum
1451                            ,szAllocFile
1452                            ,allocLineNum
1453                            ,pMem
1454                            ,(ULONG)dwRequestedSize
1455                            ,(ULONG)dwRequestedSize
1456                           );
1457
1458
1459             if (m_dwDebugFlags & kCallTracing)
1460             {
1461                 message.AppendASCII("*** CALLTRACING ENABLED ***\n");
1462                 LoaderHeapEvent *pEvent = LoaderHeapSniffer::FindEvent(this, pMem);
1463                 if (!pEvent)
1464                 {
1465                     message.AppendASCII("This pointer doesn't appear to have come from this LoaderHeap.\n");
1466                 }
1467                 else
1468                 {
1469                     message.AppendASCII(pMem == pEvent->m_pMem ? "We have the following data about this pointer:" : "This pointer points to the middle of the following block:");
1470                     pEvent->Describe(&message);
1471                 }
1472             }
1473
1474             if (pTag->m_dwRequestedSize != dwRequestedSize)
1475             {
1476                 StackSString buf;
1477                 buf.Printf(
1478                         "Possible causes:\n"
1479                         "\n"
1480                         "   - This pointer wasn't allocated from this loaderheap.\n"
1481                         "   - This pointer was allocated by AllocAlignedMem and you didn't adjust for the \"extra.\"\n" 
1482                         "   - This pointer has already been freed.\n"
1483                         "   - You passed in the wrong size. You must pass the exact same size you passed to AllocMem().\n"
1484                         "   - Someone wrote past the end of this block making it appear as if one of the above were true.\n"
1485                         );
1486                 message.Append(buf);
1487
1488             }
1489             else 
1490             {
1491                 message.AppendASCII("This memory block is completely unrecognizable.\n");
1492             }
1493
1494
1495             if (!(m_dwDebugFlags & kCallTracing))
1496             {
1497                 LoaderHeapSniffer::PitchSniffer(&message);
1498             }
1499
1500             StackScratchBuffer scratch;
1501             DbgAssertDialog(szFile, lineNum, (char*) message.GetANSI(scratch));
1502
1503         }
1504     }
1505 #endif
1506
1507     size_t dwSize = AllocMem_TotalSize(dwRequestedSize, this);
1508
1509 #ifdef _DEBUG
1510     if (m_dwDebugFlags & kCallTracing)
1511     {
1512         DEBUG_ONLY_REGION();
1513
1514         LoaderHeapValidationTag *pTag = m_fExplicitControl ? NULL : AllocMem_GetTag(pMem, dwRequestedSize);
1515         
1516
1517         LoaderHeapSniffer::RecordEvent(this,
1518                                        kFreedMem,
1519                                        szFile,
1520                                        lineNum,
1521                                        (pTag && (allocLineNum < 0)) ? pTag->m_szFile  : szAllocFile,
1522                                        (pTag && (allocLineNum < 0)) ? pTag->m_lineNum : allocLineNum,
1523                                        pMem,
1524                                        dwRequestedSize,
1525                                        dwSize
1526                                        );
1527     }
1528 #endif
1529
1530     if (m_pAllocPtr == ( ((BYTE*)pMem) + dwSize ))
1531     {
1532         // Cool. This was the last block allocated. We can just undo the allocation instead
1533         // of going to the freelist.
1534         memset(pMem, 0x00, dwSize); // Fill freed region with 0
1535         m_pAllocPtr = (BYTE*)pMem;
1536     }
1537     else
1538     {
1539         LoaderHeapFreeBlock::InsertFreeBlock(&m_pFirstFreeBlock, pMem, dwSize, this);
1540     }
1541
1542 }
1543
1544
1545 // Allocates memory aligned on power-of-2 boundary.
1546 //
1547 // The return value is a pointer that's guaranteed to be aligned.
1548 //
1549 // FREEING THIS BLOCK: Underneath, the actual block allocated may
1550 // be larger and start at an address prior to the one you got back.
1551 // It is this adjusted size and pointer that you pass to BackoutMem.
1552 // The required adjustment is passed back thru the pdwExtra pointer.
1553 //
1554 // Here is how to properly backout the memory:
1555 //
1556 //   size_t dwExtra;
1557 //   void *pMem = UnlockedAllocAlignedMem(dwRequestedSize, alignment, &dwExtra);
1558 //   _ASSERTE( 0 == (pMem & (alignment - 1)) );
1559 //   UnlockedBackoutMem( ((BYTE*)pMem) - dExtra, dwRequestedSize + dwExtra );
1560 //
1561 // If you use the AllocMemHolder or AllocMemTracker, all this is taken care of
1562 // behind the scenes. 
1563 //
1564 // 
1565 void *UnlockedLoaderHeap::UnlockedAllocAlignedMem_NoThrow(size_t  dwRequestedSize,
1566                                                           size_t  alignment,
1567                                                           size_t *pdwExtra
1568                                                           COMMA_INDEBUG(__in const char *szFile)
1569                                                           COMMA_INDEBUG(int  lineNum))
1570 {
1571     CONTRACT(void*)
1572     {
1573         NOTHROW;
1574
1575         // Macro syntax can't handle this INJECT_FAULT expression - we'll use a precondition instead
1576         //INJECT_FAULT( do{ if (*pdwExtra) {*pdwExtra = 0} RETURN NULL; } while(0) );
1577
1578         PRECONDITION( alignment != 0 );
1579         PRECONDITION(0 == (alignment & (alignment - 1))); // require power of 2
1580         POSTCONDITION( (RETVAL) ?
1581                        (0 == ( ((UINT_PTR)(RETVAL)) & (alignment - 1))) : // If non-null, pointer must be aligned
1582                        (pdwExtra == NULL || 0 == *pdwExtra)    //   or else *pdwExtra must be set to 0
1583                      );
1584     }
1585     CONTRACT_END
1586
1587     STATIC_CONTRACT_FAULT;
1588
1589     // Set default value
1590             if (pdwExtra)
1591             {
1592                 *pdwExtra = 0;
1593             }
1594
1595     SHOULD_INJECT_FAULT(RETURN NULL);
1596
1597     void *pResult;
1598
1599     INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1600
1601     // Check for overflow if we align the allocation
1602     if (dwRequestedSize + alignment < dwRequestedSize)
1603     {
1604         RETURN NULL;
1605     }
1606
1607     // We don't know how much "extra" we need to satisfy the alignment until we know
1608     // which address will be handed out which in turn we don't know because we don't
1609     // know whether the allocation will fit within the current reserved range.
1610     //
1611     // Thus, we'll request as much heap growth as is needed for the worst case (extra == alignment)
1612     size_t dwRoomSize = AllocMem_TotalSize(dwRequestedSize + alignment, this);
1613     if (dwRoomSize > GetBytesAvailCommittedRegion())
1614     {
1615         if (!GetMoreCommittedPages(dwRoomSize))
1616         {
1617             RETURN NULL;
1618         }
1619     }
1620
1621     pResult = m_pAllocPtr;
1622
1623     size_t extra = alignment - ((size_t)pResult & ((size_t)alignment - 1));
1624
1625 // On DEBUG, we force a non-zero extra so people don't forget to adjust for it on backout
1626 #ifndef _DEBUG
1627     if (extra == alignment)
1628     {
1629         extra = 0;
1630     }
1631 #endif
1632
1633     S_SIZE_T cbAllocSize = S_SIZE_T( dwRequestedSize ) + S_SIZE_T( extra );
1634     if( cbAllocSize.IsOverflow() )
1635     {
1636         RETURN NULL;
1637     }
1638
1639     size_t dwSize = AllocMem_TotalSize( cbAllocSize.Value(), this);
1640     m_pAllocPtr += dwSize;
1641
1642     
1643     ((BYTE*&)pResult) += extra;
1644
1645 #ifdef _DEBUG
1646      BYTE *pAllocatedBytes = (BYTE *)pResult;
1647 #if LOADER_HEAP_DEBUG_BOUNDARY > 0
1648     // Don't fill the entire memory - we assume it is all zeroed -just the memory after our alloc
1649     memset(pAllocatedBytes + dwRequestedSize, 0xee, LOADER_HEAP_DEBUG_BOUNDARY);
1650 #endif
1651
1652     if (dwRequestedSize != 0)
1653     {
1654         _ASSERTE_MSG(pAllocatedBytes[0] == 0 && memcmp(pAllocatedBytes, pAllocatedBytes + 1, dwRequestedSize - 1) == 0,
1655             "LoaderHeap must return zero-initialized memory");
1656     }
1657
1658     if (m_dwDebugFlags & kCallTracing)
1659     {
1660         LoaderHeapSniffer::RecordEvent(this,
1661                                        kAllocMem,
1662                                        szFile,
1663                                        lineNum,
1664                                        szFile,
1665                                        lineNum,
1666                                        ((BYTE*)pResult) - extra,
1667                                        dwRequestedSize + extra,
1668                                        dwSize
1669                                        );
1670     }
1671
1672     EtwAllocRequest(this, pResult, dwSize);
1673
1674     if (!m_fExplicitControl)
1675     {
1676         LoaderHeapValidationTag *pTag = AllocMem_GetTag(((BYTE*)pResult) - extra, dwRequestedSize + extra);
1677         pTag->m_allocationType  = kAllocMem;
1678         pTag->m_dwRequestedSize = dwRequestedSize + extra;
1679         pTag->m_szFile          = szFile;
1680         pTag->m_lineNum         = lineNum;
1681     }
1682 #endif //_DEBUG
1683
1684     if (pdwExtra)
1685     {
1686         *pdwExtra = extra;
1687     }
1688
1689     RETURN pResult;
1690
1691 }
1692
1693
1694
1695 void *UnlockedLoaderHeap::UnlockedAllocAlignedMem(size_t  dwRequestedSize,
1696                                                   size_t  dwAlignment,
1697                                                   size_t *pdwExtra
1698                                                   COMMA_INDEBUG(__in const char *szFile)
1699                                                   COMMA_INDEBUG(int  lineNum))
1700 {
1701     CONTRACTL
1702     {
1703         THROWS;
1704         INJECT_FAULT(ThrowOutOfMemory());
1705     }
1706     CONTRACTL_END
1707
1708     void *pResult = UnlockedAllocAlignedMem_NoThrow(dwRequestedSize,
1709                                                     dwAlignment,
1710                                                     pdwExtra
1711                                                     COMMA_INDEBUG(szFile)
1712                                                     COMMA_INDEBUG(lineNum));
1713
1714     if (!pResult)
1715     {
1716         ThrowOutOfMemory();
1717     }
1718
1719     return pResult;
1720     
1721
1722 }
1723
1724
1725
1726 void *UnlockedLoaderHeap::UnlockedAllocMemForCode_NoThrow(size_t dwHeaderSize, size_t dwCodeSize, DWORD dwCodeAlignment, size_t dwReserveForJumpStubs)
1727 {
1728     CONTRACT(void*)
1729     {
1730         INSTANCE_CHECK;
1731         NOTHROW;
1732         INJECT_FAULT(CONTRACT_RETURN NULL;);
1733         PRECONDITION(0 == (dwCodeAlignment & (dwCodeAlignment - 1))); // require power of 2
1734         POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
1735     }
1736     CONTRACT_END;
1737
1738     _ASSERTE(m_fExplicitControl);
1739
1740     INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
1741
1742     // We don't know how much "extra" we need to satisfy the alignment until we know
1743     // which address will be handed out which in turn we don't know because we don't
1744     // know whether the allocation will fit within the current reserved range.
1745     //
1746     // Thus, we'll request as much heap growth as is needed for the worst case (we request an extra dwCodeAlignment - 1 bytes)
1747
1748     S_SIZE_T cbAllocSize = S_SIZE_T(dwHeaderSize) + S_SIZE_T(dwCodeSize) + S_SIZE_T(dwCodeAlignment - 1) + S_SIZE_T(dwReserveForJumpStubs);
1749     if( cbAllocSize.IsOverflow() )
1750     {
1751         RETURN NULL;
1752     }
1753
1754     if (cbAllocSize.Value() > GetBytesAvailCommittedRegion())
1755     {
1756         if (GetMoreCommittedPages(cbAllocSize.Value()) == FALSE)
1757         {
1758             RETURN NULL;
1759         }
1760     }
1761
1762     BYTE *pResult = (BYTE *)ALIGN_UP(m_pAllocPtr + dwHeaderSize, dwCodeAlignment);
1763     EtwAllocRequest(this, pResult, (pResult + dwCodeSize) - m_pAllocPtr);
1764     m_pAllocPtr = pResult + dwCodeSize;
1765
1766     RETURN pResult;
1767 }
1768
1769
1770 #endif // #ifndef DACCESS_COMPILE
1771
1772 BOOL UnlockedLoaderHeap::IsExecutable()
1773 {
1774     return (m_Options & LHF_EXECUTABLE);
1775 }
1776
1777 #ifdef DACCESS_COMPILE
1778
1779 void UnlockedLoaderHeap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
1780 {
1781     WRAPPER_NO_CONTRACT;
1782
1783     DAC_ENUM_DTHIS();
1784
1785     PTR_LoaderHeapBlock block = m_pFirstBlock;
1786     while (block.IsValid())
1787     {
1788         // All we know is the virtual size of this block.  We don't have any way to tell how 
1789         // much of this space was actually comitted, so don't expect that this will always 
1790         // succeed.
1791         // @dbgtodo : Ideally we'd reduce the risk of corruption causing problems here.
1792         //   We could extend LoaderHeapBlock to track a commit size,
1793         //   but it seems wasteful (eg. makes each AppDomain objects 32 bytes larger on x64).
1794         TADDR addr = dac_cast<TADDR>(block->pVirtualAddress);
1795         TSIZE_T size = block->dwVirtualSize;
1796         DacEnumMemoryRegion(addr, size, false);
1797         
1798         block = block->pNext;
1799     }
1800 }
1801
1802 #endif // #ifdef DACCESS_COMPILE
1803
1804
1805 void UnlockedLoaderHeap::EnumPageRegions (EnumPageRegionsCallback *pCallback, PTR_VOID pvArgs)
1806 {
1807     WRAPPER_NO_CONTRACT;
1808
1809     PTR_LoaderHeapBlock block = m_pFirstBlock;
1810     while (block)
1811     {
1812         if ((*pCallback)(pvArgs, block->pVirtualAddress, block->dwVirtualSize))
1813         {
1814             break;
1815         }
1816         
1817         block = block->pNext;
1818     }
1819 }
1820
1821
1822 #ifdef _DEBUG
1823
1824 void UnlockedLoaderHeap::DumpFreeList()
1825 {
1826     LIMITED_METHOD_CONTRACT;
1827     if (m_pFirstFreeBlock == NULL)
1828     {
1829         printf("FREEDUMP: FreeList is empty\n");
1830     }
1831     else
1832     {
1833         LoaderHeapFreeBlock *pBlock = m_pFirstFreeBlock;
1834         while (pBlock != NULL)
1835         {
1836             size_t dwsize = pBlock->m_dwSize;
1837             BOOL ccbad = FALSE;
1838             BOOL sizeunaligned = FALSE;
1839             BOOL sizesmall = FALSE;
1840
1841             if ( 0 != (dwsize & ALLOC_ALIGN_CONSTANT) )
1842             {
1843                 sizeunaligned = TRUE;
1844             }
1845             if ( dwsize < sizeof(LoaderHeapBlock))
1846             {
1847                 sizesmall = TRUE;
1848             }
1849
1850             for (size_t i = sizeof(LoaderHeapFreeBlock); i < dwsize; i++)
1851             {
1852                 if ( ((BYTE*)pBlock)[i] != 0xcc )
1853                 {
1854                     ccbad = TRUE;
1855                     break;
1856                 }
1857             }
1858
1859             printf("Addr = %pxh, Size = %lxh", pBlock, ((ULONG)dwsize));
1860             if (ccbad) printf(" *** ERROR: NOT CC'd ***");
1861             if (sizeunaligned) printf(" *** ERROR: size not a multiple of ALLOC_ALIGN_CONSTANT ***");
1862             if (sizesmall) printf(" *** ERROR: size smaller than sizeof(LoaderHeapFreeBlock) ***");
1863             printf("\n");
1864
1865             pBlock = pBlock->m_pNext;
1866         }
1867     }
1868 }
1869
1870
1871 void UnlockedLoaderHeap::UnlockedClearEvents()
1872 {
1873     WRAPPER_NO_CONTRACT;
1874     LoaderHeapSniffer::ClearEvents(this);
1875 }
1876
1877 void UnlockedLoaderHeap::UnlockedCompactEvents()
1878 {
1879     WRAPPER_NO_CONTRACT;
1880     LoaderHeapSniffer::CompactEvents(this);
1881 }
1882
1883 void UnlockedLoaderHeap::UnlockedPrintEvents()
1884 {
1885     WRAPPER_NO_CONTRACT;
1886     LoaderHeapSniffer::PrintEvents(this);
1887 }
1888
1889
1890 #endif //_DEBUG
1891
1892 //************************************************************************************
1893 // LOADERHEAP SNIFFER METHODS
1894 //************************************************************************************
1895 #ifdef _DEBUG
1896
1897 /*static*/ VOID LoaderHeapSniffer::RecordEvent(UnlockedLoaderHeap *pHeap,
1898                                                AllocationType allocationType,
1899                                                __in const char *szFile,
1900                                                int            lineNum,
1901                                                __in const char *szAllocFile,
1902                                                int            allocLineNum,
1903                                                void          *pMem,
1904                                                size_t         dwRequestedSize,
1905                                                size_t         dwSize
1906                                               )
1907 {
1908     CONTRACTL
1909     {
1910         NOTHROW;
1911         GC_NOTRIGGER;
1912         FORBID_FAULT;  //If we OOM in here, we just throw the event away.
1913     }
1914     CONTRACTL_END
1915
1916     LoaderHeapEvent *pNewEvent;
1917     {
1918         {
1919             FAULT_NOT_FATAL();
1920             pNewEvent = new (nothrow) LoaderHeapEvent;
1921         }
1922         if (!pNewEvent)
1923         {
1924             if (!(pHeap->m_dwDebugFlags & pHeap->kEncounteredOOM))
1925             {
1926                 pHeap->m_dwDebugFlags |= pHeap->kEncounteredOOM;
1927                 _ASSERTE(!"LOADERHEAPSNIFFER: Failed allocation of LoaderHeapEvent. Call tracing information will be incomplete.");
1928             }
1929         }
1930         else
1931         {
1932             pNewEvent->m_allocationType     = allocationType;
1933             pNewEvent->m_szFile             = szFile;
1934             pNewEvent->m_lineNum            = lineNum;
1935             pNewEvent->m_szAllocFile        = szAllocFile;
1936             pNewEvent->m_allocLineNum       = allocLineNum;
1937             pNewEvent->m_pMem               = pMem;
1938             pNewEvent->m_dwRequestedSize    = dwRequestedSize;
1939             pNewEvent->m_dwSize             = dwSize;
1940
1941             pNewEvent->m_pNext              = pHeap->m_pEventList;
1942             pHeap->m_pEventList             = pNewEvent;
1943         }
1944     }
1945 }
1946
1947
1948
1949 /*static*/
1950 void LoaderHeapSniffer::ValidateFreeList(UnlockedLoaderHeap *pHeap)
1951 {
1952     CANNOT_HAVE_CONTRACT;
1953
1954     // No contract. This routine is only called if we've AV'd inside the
1955     // loaderheap. The system is already toast. We're trying to be a hero
1956     // and produce the best diagnostic info we can. Last thing we need here
1957     // is a secondary assert inside the contract stuff.
1958     //
1959     // This contract violation is permanent.
1960     CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation);  // This violation won't be removed
1961
1962     LoaderHeapFreeBlock *pFree     = pHeap->m_pFirstFreeBlock;
1963     LoaderHeapFreeBlock *pPrev     = NULL;
1964
1965    
1966     void                *pBadAddr = NULL;
1967     LoaderHeapFreeBlock *pProbeThis = NULL;
1968     const char          *pExpected = NULL;
1969
1970     while (pFree != NULL)
1971     {
1972         if ( 0 != ( ((ULONG_PTR)pFree) & ALLOC_ALIGN_CONSTANT ))
1973         {
1974             // Not aligned - can't be a valid freeblock. Most likely we followed a bad pointer from the previous block.
1975             pProbeThis = pPrev;
1976             pBadAddr = pPrev ? &(pPrev->m_pNext) : &(pHeap->m_pFirstFreeBlock);
1977             pExpected = "a pointer to a valid LoaderHeapFreeBlock";
1978             break;
1979         }
1980
1981         size_t dwSize = pFree->m_dwSize;
1982         if (dwSize < AllocMem_TotalSize(1, pHeap) ||
1983             0 != (dwSize & ALLOC_ALIGN_CONSTANT))
1984         {
1985             // Size is not a valid value (out of range or unaligned.)
1986             pProbeThis = pFree;
1987             pBadAddr = &(pFree->m_dwSize);
1988             pExpected = "a valid block size (multiple of pointer size)";
1989             break;
1990         }
1991
1992         size_t i;
1993         for (i = sizeof(LoaderHeapFreeBlock); i < dwSize; i++)
1994         {
1995             if ( ((BYTE*)pFree)[i] != 0xcc )
1996             {
1997                 pProbeThis = pFree;
1998                 pBadAddr = i + ((BYTE*)pFree);
1999                 pExpected = "0xcc (our fill value for free blocks)";
2000                 break;
2001             }
2002         }
2003         if (i != dwSize)
2004         {
2005             break;
2006         }
2007          
2008
2009
2010         pPrev = pFree;
2011         pFree = pFree->m_pNext;
2012     }
2013
2014     if (pFree == NULL)
2015     {
2016         return; // No problems found
2017     }
2018
2019     {
2020         StackSString message;
2021
2022         message.Printf("A loaderheap freelist has been corrupted. The bytes at or near address 0x%p appears to have been overwritten. We expected to see %s here.\n"
2023                        "\n"
2024                        "    LoaderHeap:                 0x%p\n"
2025                        "    Suspect address at:         0x%p\n"
2026                        "    Start of suspect freeblock: 0x%p\n"
2027                        "\n"    
2028                        , pBadAddr
2029                        , pExpected
2030                        , pHeap
2031                        , pBadAddr
2032                        , pProbeThis
2033                        );
2034
2035         if (!(pHeap->m_dwDebugFlags & pHeap->kCallTracing))
2036         {
2037             message.AppendASCII("\nThe usual reason is that someone wrote past the end of a block or wrote into a block after freeing it."
2038                            "\nOf course, the culprit is long gone so it's probably too late to debug this now. Try turning on call-tracing"
2039                            "\nand reproing. We can attempt to find out who last owned the surrounding pieces of memory."
2040                            "\n"
2041                            "\nTo turn on call-tracing, set the following registry DWORD value:"
2042                            "\n"
2043                            "\n    HKLM\\Software\\Microsoft\\.NETFramework\\LoaderHeapCallTracing = 1"
2044                            "\n"
2045                            );
2046
2047         }
2048         else
2049         {
2050             LoaderHeapEvent *pBadAddrEvent = FindEvent(pHeap, pBadAddr);
2051
2052             message.AppendASCII("*** CALL TRACING ENABLED ***\n\n");
2053
2054             if (pBadAddrEvent)
2055             {
2056                 message.AppendASCII("\nThe last known owner of the corrupted address was:\n");
2057                 pBadAddrEvent->Describe(&message);
2058             }
2059             else
2060             {
2061                 message.AppendASCII("\nNo known owner of last corrupted address.\n");
2062             }
2063
2064             LoaderHeapEvent *pPrevEvent = FindEvent(pHeap, ((BYTE*)pProbeThis) - 1);
2065
2066             int count = 3;
2067             while (count-- && 
2068                    pPrevEvent != NULL &&
2069                    ( ((UINT_PTR)pProbeThis) - ((UINT_PTR)(pPrevEvent->m_pMem)) + pPrevEvent->m_dwSize ) < 1024)
2070             {
2071                 message.AppendASCII("\nThis block is located close to the corruption point. ");
2072                 if (pPrevEvent->QuietValidate())
2073                 {
2074                     message.AppendASCII("If it was overrun, it might have caused this.");
2075                 }
2076                 else
2077                 {
2078                     message.AppendASCII("*** CORRUPTION DETECTED IN THIS BLOCK ***");
2079                 }
2080                 pPrevEvent->Describe(&message);
2081                 pPrevEvent = FindEvent(pHeap, ((BYTE*)(pPrevEvent->m_pMem)) - 1);
2082             }
2083
2084
2085         }
2086
2087         StackScratchBuffer scratch;
2088         DbgAssertDialog(__FILE__, __LINE__, (char*) message.GetANSI(scratch));
2089
2090     }
2091
2092     
2093     
2094 }
2095
2096
2097 BOOL LoaderHeapEvent::QuietValidate()
2098 {
2099     WRAPPER_NO_CONTRACT;
2100
2101     if (m_allocationType == kAllocMem)
2102     {
2103         LoaderHeapValidationTag *pTag = AllocMem_GetTag(m_pMem, m_dwRequestedSize);
2104         return (pTag->m_allocationType == m_allocationType && pTag->m_dwRequestedSize == m_dwRequestedSize);
2105     }
2106     else
2107     {
2108         // We can't easily validate freed blocks.
2109         return TRUE;
2110     }
2111 }
2112
2113
2114 #endif //_DEBUG
2115
2116 #ifndef DACCESS_COMPILE
2117
2118 AllocMemTracker::AllocMemTracker()
2119 {
2120     CONTRACTL
2121     {
2122         NOTHROW;
2123         FORBID_FAULT;
2124         CANNOT_TAKE_LOCK;
2125     }
2126     CONTRACTL_END
2127
2128     m_FirstBlock.m_pNext    = NULL;
2129     m_FirstBlock.m_nextFree = 0;
2130     m_pFirstBlock = &m_FirstBlock;
2131
2132     m_fReleased   = FALSE;
2133 }
2134
2135 AllocMemTracker::~AllocMemTracker()
2136 {
2137     CONTRACTL
2138     {
2139         NOTHROW;
2140         FORBID_FAULT;
2141     }
2142     CONTRACTL_END
2143
2144     if (!m_fReleased)
2145     {
2146         AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2147         while (pBlock)
2148         {
2149             // Do the loop in reverse - loaderheaps work best if
2150             // we allocate and backout in LIFO order.
2151             for (int i = pBlock->m_nextFree - 1; i >= 0; i--)
2152             {
2153                 AllocMemTrackerNode *pNode = &(pBlock->m_Node[i]);
2154                 pNode->m_pHeap->RealBackoutMem(pNode->m_pMem
2155                                                ,pNode->m_dwRequestedSize
2156 #ifdef _DEBUG
2157                                                ,__FILE__
2158                                                ,__LINE__
2159                                                ,pNode->m_szAllocFile
2160                                                ,pNode->m_allocLineNum
2161 #endif
2162                                               );
2163     
2164             }
2165     
2166             pBlock = pBlock->m_pNext;
2167         }
2168     }
2169
2170
2171     AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2172     while (pBlock != &m_FirstBlock)
2173     {
2174         AllocMemTrackerBlock *pNext = pBlock->m_pNext;
2175         delete pBlock;
2176         pBlock = pNext;
2177     }
2178
2179     INDEBUG(memset(this, 0xcc, sizeof(*this));)
2180 }
2181
2182 void *AllocMemTracker::Track(TaggedMemAllocPtr tmap)
2183 {
2184     CONTRACTL
2185     {
2186         THROWS;
2187         INJECT_FAULT(ThrowOutOfMemory(););
2188     }
2189     CONTRACTL_END
2190
2191     _ASSERTE(this); 
2192
2193     void *pv = Track_NoThrow(tmap);
2194     if (!pv)
2195     {
2196         ThrowOutOfMemory();
2197     }
2198     return pv;
2199 }
2200
2201 void *AllocMemTracker::Track_NoThrow(TaggedMemAllocPtr tmap)
2202 {
2203     CONTRACTL
2204     {
2205         NOTHROW;
2206         INJECT_FAULT(return NULL;);
2207     }
2208     CONTRACTL_END
2209
2210     _ASSERTE(this); 
2211
2212     // Calling Track() after calling SuppressRelease() is almost certainly a bug. You're supposed to call SuppressRelease() only after you're
2213     // sure no subsequent failure will force you to backout the memory.
2214     _ASSERTE( (!m_fReleased) && "You've already called SuppressRelease on this AllocMemTracker which implies you've passed your point of no failure. Why are you still doing allocations?");
2215
2216
2217     if (tmap.m_pMem != NULL)
2218     {
2219         AllocMemHolder<void*> holder(tmap);  // If anything goes wrong in here, this holder will backout the allocation for the caller.
2220         if (m_fReleased)
2221         {
2222             holder.SuppressRelease();
2223         }
2224         AllocMemTrackerBlock *pBlock = m_pFirstBlock;
2225         if (pBlock->m_nextFree == kAllocMemTrackerBlockSize)
2226         {
2227             AllocMemTrackerBlock *pNewBlock = new (nothrow) AllocMemTrackerBlock;
2228             if (!pNewBlock)
2229             {
2230                 return NULL;
2231             }
2232
2233             pNewBlock->m_pNext = m_pFirstBlock;
2234             pNewBlock->m_nextFree = 0;
2235
2236             m_pFirstBlock = pNewBlock;
2237
2238             pBlock = pNewBlock;
2239         }
2240
2241         // From here on, we can't fail
2242         pBlock->m_Node[pBlock->m_nextFree].m_pHeap           = tmap.m_pHeap;
2243         pBlock->m_Node[pBlock->m_nextFree].m_pMem            = tmap.m_pMem;
2244         pBlock->m_Node[pBlock->m_nextFree].m_dwRequestedSize = tmap.m_dwRequestedSize;
2245 #ifdef _DEBUG
2246         pBlock->m_Node[pBlock->m_nextFree].m_szAllocFile     = tmap.m_szFile;
2247         pBlock->m_Node[pBlock->m_nextFree].m_allocLineNum    = tmap.m_lineNum;
2248 #endif
2249
2250         pBlock->m_nextFree++;
2251
2252         holder.SuppressRelease();
2253
2254
2255     }
2256     return (void *)tmap;
2257     
2258
2259
2260 }
2261
2262
2263 void AllocMemTracker::SuppressRelease()
2264 {
2265     LIMITED_METHOD_CONTRACT;
2266
2267     _ASSERTE(this); 
2268
2269     m_fReleased = TRUE;
2270 }
2271
2272 #endif //#ifndef DACCESS_COMPILE