// against the interface declaration.
public:
-// Allocate memory of the given size in bytes. All bytes of the returned block
-// must be initialized to zero. If `usePageAllocator` is true, the implementation
-// should use an allocator that deals in OS pages if one exists.
-void* allocateMemory(size_t size, bool usePageAllocator = false);
-
-// Frees memory previous obtained by a call to `ICorJitHost::allocateMemory`. The
-// value of the `usePageAllocator` parameter must match the value that was
-// provided to the call to used to allocate the memory.
-void freeMemory(void* block, bool usePageAllocator = false);
+// Allocate memory of the given size in bytes.
+void* allocateMemory(size_t size);
+
+// Frees memory previous obtained by a call to `ICorJitHost::allocateMemory`.
+void freeMemory(void* block);
// Return an integer config value for the given key, if any exists.
int getIntConfigValue(const wchar_t* name, int defaultValue);
this->mc = methodContext;
}
-void* JitHost::allocateMemory(size_t size, bool usePageAllocator)
+void* JitHost::allocateMemory(size_t size)
{
- return wrappedHost->allocateMemory(size, usePageAllocator);
+ return wrappedHost->allocateMemory(size);
}
-void JitHost::freeMemory(void* block, bool usePageAllocator)
+void JitHost::freeMemory(void* block)
{
- return wrappedHost->freeMemory(block, usePageAllocator);
+ return wrappedHost->freeMemory(block);
}
int JitHost::getIntConfigValue(const wchar_t* key, int defaultValue)
this->mcs = methodCallSummarizer;
}
-void* JitHost::allocateMemory(size_t size, bool usePageAllocator)
+void* JitHost::allocateMemory(size_t size)
{
- return wrappedHost->allocateMemory(size, usePageAllocator);
+ return wrappedHost->allocateMemory(size);
}
-void JitHost::freeMemory(void* block, bool usePageAllocator)
+void JitHost::freeMemory(void* block)
{
- return wrappedHost->freeMemory(block, usePageAllocator);
+ return wrappedHost->freeMemory(block);
}
int JitHost::getIntConfigValue(const wchar_t* key, int defaultValue)
{
}
-void* JitHost::allocateMemory(size_t size, bool usePageAllocator)
+void* JitHost::allocateMemory(size_t size)
{
- return wrappedHost->allocateMemory(size, usePageAllocator);
+ return wrappedHost->allocateMemory(size);
}
-void JitHost::freeMemory(void* block, bool usePageAllocator)
+void JitHost::freeMemory(void* block)
{
- return wrappedHost->freeMemory(block, usePageAllocator);
+ return wrappedHost->freeMemory(block);
}
int JitHost::getIntConfigValue(const wchar_t* key, int defaultValue)
{
}
-void* JitHost::allocateMemory(size_t size, bool usePageAllocator)
+void* JitHost::allocateMemory(size_t size)
{
return InitIEEMemoryManager(&jitInstance)->ClrVirtualAlloc(nullptr, size, 0, 0);
}
-void JitHost::freeMemory(void* block, bool usePageAllocator)
+void JitHost::freeMemory(void* block)
{
InitIEEMemoryManager(&jitInstance)->ClrVirtualFree(block, 0, 0);
}
IN CORINFO_METHOD_HANDLE hMethod,
OUT CORJIT_FLAGS *pFlags) = 0;
+ virtual ICorJitHost* GetJitHost() = 0;
+
// needed for stubs to obtain the number of bytes to copy into the native image
// return the beginning of the stub and the size to copy (in bytes)
virtual void* GetStubSize(void *pStubAddress, DWORD *pSizeToCopy) = 0;
class ICorJitHost
{
public:
- // Allocate memory of the given size in bytes. All bytes of the returned block
- // must be initialized to zero. If `usePageAllocator` is true, the implementation
- // should use an allocator that deals in OS pages if one exists.
- virtual void* allocateMemory(size_t size, bool usePageAllocator = false) = 0;
+ // Allocate memory of the given size in bytes.
+ virtual void* allocateMemory(size_t size) = 0;
- // Frees memory previous obtained by a call to `ICorJitHost::allocateMemory`. The
- // value of the `usePageAllocator` parameter must match the value that was
- // provided to the call to used to allocate the memory.
- virtual void freeMemory(void* block, bool usePageAllocator = false) = 0;
+ // Frees memory previous obtained by a call to `ICorJitHost::allocateMemory`.
+ virtual void freeMemory(void* block) = 0;
// Return an integer config value for the given key, if any exists.
virtual int getIntConfigValue(
virtual void freeStringConfigValue(
const wchar_t* value
) = 0;
+
+ // Allocate memory slab of the given size in bytes. The host is expected to pool
+ // these for a good performance.
+ virtual void* allocateSlab(size_t size, size_t* pActualSize)
+ {
+ *pActualSize = size;
+ return allocateMemory(size);
+ }
+
+ // Free memory slab of the given size in bytes.
+ virtual void freeSlab(void* slab, size_t actualSize)
+ {
+ freeMemory(slab);
+ }
};
#endif
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#ifndef __JITHOST_H__
-#define __JITHOST_H__
-
-// Common implementation of ICorJitHost that respects CLR host policies.
-class JitHost : public ICorJitHost
-{
-private:
- static JitHost theJitHost;
-
- JitHost() {}
- JitHost(const JitHost& other) = delete;
- JitHost& operator=(const JitHost& other) = delete;
-
-public:
- virtual void* allocateMemory(size_t size, bool usePageAllocator);
- virtual void freeMemory(void* block, bool usePageAllocator);
- virtual int getIntConfigValue(const wchar_t* name, int defaultValue);
- virtual const wchar_t* getStringConfigValue(const wchar_t* name);
- virtual void freeStringConfigValue(const wchar_t* value);
-
- static ICorJitHost* getJitHost();
-};
-
-#endif // __JITHOST_H__
#pragma hdrstop
#endif // defined(_MSC_VER)
-//------------------------------------------------------------------------
-// SinglePagePool: Manage a single, default-sized page pool for ArenaAllocator.
-//
-// Allocating a page is slightly costly as it involves the JIT host and
-// possibly the operating system as well. This pool avoids allocation
-// in many cases (i.e. for all non-concurrent method compilations).
-//
-class ArenaAllocator::SinglePagePool
-{
- // The page maintained by this pool
- PageDescriptor* m_page;
- // The page available for allocation (either m_page or &m_shutdownPage if shutdown was called)
- PageDescriptor* m_availablePage;
- // A dummy page that is made available during shutdown
- PageDescriptor m_shutdownPage;
-
-public:
- // Attempt to acquire the page managed by this pool.
- PageDescriptor* tryAcquirePage(IEEMemoryManager* memoryManager)
- {
- assert(memoryManager != nullptr);
-
- PageDescriptor* page = InterlockedExchangeT(&m_availablePage, nullptr);
- if ((page != nullptr) && (page->m_memoryManager != memoryManager))
- {
- // The pool page belongs to a different memory manager, release it.
- releasePage(page, page->m_memoryManager);
- page = nullptr;
- }
-
- assert((page == nullptr) || isPoolPage(page));
-
- return page;
- }
-
- // Attempt to pool the specified page.
- void tryPoolPage(PageDescriptor* page)
- {
- assert(page != &m_shutdownPage);
-
- // Try to pool this page, give up if another thread has already pooled a page.
- InterlockedCompareExchangeT(&m_page, page, nullptr);
- }
-
- // Check if a page is pooled.
- bool isEmpty()
- {
- return (m_page == nullptr);
- }
-
- // Check if the specified page is pooled.
- bool isPoolPage(PageDescriptor* page)
- {
- return (m_page == page);
- }
-
- // Release the specified page.
- PageDescriptor* releasePage(PageDescriptor* page, IEEMemoryManager* memoryManager)
- {
- // tryAcquirePage may end up releasing the shutdown page if shutdown was called.
- assert((page == &m_shutdownPage) || isPoolPage(page));
- assert((page == &m_shutdownPage) || (memoryManager != nullptr));
-
- // Normally m_availablePage should be null when releasePage is called but it can
- // be the shutdown page if shutdown is called while the pool page is in use.
- assert((m_availablePage == nullptr) || (m_availablePage == &m_shutdownPage));
-
- PageDescriptor* next = page->m_next;
- // Update the page's memory manager (replaces m_next that's not needed in this state).
- page->m_memoryManager = memoryManager;
- // Try to make the page available. This will fail if the pool was shutdown
- // and then we need to free the page here.
- PageDescriptor* shutdownPage = InterlockedCompareExchangeT(&m_availablePage, page, nullptr);
- if (shutdownPage != nullptr)
- {
- assert(shutdownPage == &m_shutdownPage);
- freeHostMemory(memoryManager, page);
- }
-
- // Return the next page for caller's convenience.
- return next;
- }
-
- // Free the pooled page.
- void shutdown()
- {
- // If the pool page is available then acquire it now so it can be freed.
- // Also make the shutdown page available so that:
- // - tryAcquirePage won't be return it because it has a null memory manager
- // - releasePage won't be able to make the pool page available and instead will free it
- PageDescriptor* page = InterlockedExchangeT(&m_availablePage, &m_shutdownPage);
-
- assert(page != &m_shutdownPage);
- assert((page == nullptr) || isPoolPage(page));
-
- if ((page != nullptr) && (page->m_memoryManager != nullptr))
- {
- freeHostMemory(page->m_memoryManager, page);
- }
- }
-};
-
-ArenaAllocator::SinglePagePool ArenaAllocator::s_pagePool = {};
-
//------------------------------------------------------------------------
// ArenaAllocator::bypassHostAllocator:
// Indicates whether or not the ArenaAllocator should bypass the JIT
// ArenaAllocator::ArenaAllocator:
// Default-constructs an arena allocator.
ArenaAllocator::ArenaAllocator()
- : m_memoryManager(nullptr)
- , m_firstPage(nullptr)
- , m_lastPage(nullptr)
- , m_nextFreeByte(nullptr)
- , m_lastFreeByte(nullptr)
+ : m_firstPage(nullptr), m_lastPage(nullptr), m_nextFreeByte(nullptr), m_lastFreeByte(nullptr)
{
- assert(!isInitialized());
-}
-
-//------------------------------------------------------------------------
-// ArenaAllocator::initialize:
-// Initializes the arena allocator.
-//
-// Arguments:
-// memoryManager - The `IEEMemoryManager` instance that will be used to
-// allocate memory for arena pages.
-void ArenaAllocator::initialize(IEEMemoryManager* memoryManager)
-{
- assert(!isInitialized());
- m_memoryManager = memoryManager;
- assert(isInitialized());
-
#if MEASURE_MEM_ALLOC
memset(&m_stats, 0, sizeof(m_stats));
memset(&m_statsAllocators, 0, sizeof(m_statsAllocators));
#endif // MEASURE_MEM_ALLOC
}
-bool ArenaAllocator::isInitialized()
-{
- return m_memoryManager != nullptr;
-}
-
//------------------------------------------------------------------------
// ArenaAllocator::allocateNewPage:
// Allocates a new arena page.
// A pointer to the first usable byte of the newly allocated page.
void* ArenaAllocator::allocateNewPage(size_t size)
{
- assert(isInitialized());
-
size_t pageSize = sizeof(PageDescriptor) + size;
// Check for integer overflow
m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
}
- PageDescriptor* newPage = nullptr;
- bool tryPoolNewPage = false;
+ PageDescriptor* newPage = nullptr;
if (!bypassHostAllocator())
{
- // Round to the nearest multiple of OS page size
+ // Round to the nearest multiple of default page size
pageSize = roundUp(pageSize, DEFAULT_PAGE_SIZE);
-
- // If this is the first time we allocate a page then try to use the pool page.
- if ((m_firstPage == nullptr) && (pageSize == DEFAULT_PAGE_SIZE))
- {
- newPage = s_pagePool.tryAcquirePage(m_memoryManager);
-
- if (newPage == nullptr)
- {
- // If there's no pool page yet then try to pool the newly allocated page.
- tryPoolNewPage = s_pagePool.isEmpty();
- }
- else
- {
- assert(newPage->m_memoryManager == m_memoryManager);
- assert(newPage->m_pageBytes == DEFAULT_PAGE_SIZE);
- }
- }
}
if (newPage == nullptr)
{
// Allocate the new page
- newPage = static_cast<PageDescriptor*>(allocateHostMemory(m_memoryManager, pageSize));
+ newPage = static_cast<PageDescriptor*>(allocateHostMemory(pageSize, &pageSize));
if (newPage == nullptr)
{
NOMEM();
}
-
- if (tryPoolNewPage)
- {
- s_pagePool.tryPoolPage(newPage);
- }
}
// Append the new page to the end of the list
// Performs any necessary teardown for an `ArenaAllocator`.
void ArenaAllocator::destroy()
{
- assert(isInitialized());
-
PageDescriptor* page = m_firstPage;
- // If the first page is the pool page then return it to the pool.
- if ((page != nullptr) && s_pagePool.isPoolPage(page))
- {
- page = s_pagePool.releasePage(page, m_memoryManager);
- }
-
// Free all of the allocated pages
for (PageDescriptor* next; page != nullptr; page = next)
{
- assert(!s_pagePool.isPoolPage(page));
next = page->m_next;
- freeHostMemory(m_memoryManager, page);
+ freeHostMemory(page, page->m_pageBytes);
}
// Clear out the allocator's fields
- m_memoryManager = nullptr;
- m_firstPage = nullptr;
- m_lastPage = nullptr;
- m_nextFreeByte = nullptr;
- m_lastFreeByte = nullptr;
- assert(!isInitialized());
+ m_firstPage = nullptr;
+ m_lastPage = nullptr;
+ m_nextFreeByte = nullptr;
+ m_lastFreeByte = nullptr;
}
// The debug version of the allocator may allocate directly from the
//
// Arguments:
// size - The number of bytes to allocate.
+// pActualSize - The number of byte actually allocated.
//
// Return Value:
// A pointer to the allocated memory.
-void* ArenaAllocator::allocateHostMemory(IEEMemoryManager* memoryManager, size_t size)
+void* ArenaAllocator::allocateHostMemory(size_t size, size_t* pActualSize)
{
- assert(memoryManager != nullptr);
-
#if defined(DEBUG)
if (bypassHostAllocator())
{
+ *pActualSize = size;
return ::HeapAlloc(GetProcessHeap(), 0, size);
}
- else
- {
- return ClrAllocInProcessHeap(0, S_SIZE_T(size));
- }
-#else // defined(DEBUG)
- return memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
#endif // !defined(DEBUG)
+
+ return g_jitHost->allocateSlab(size, pActualSize);
}
//------------------------------------------------------------------------
//
// Arguments:
// block - A pointer to the memory to free.
-void ArenaAllocator::freeHostMemory(IEEMemoryManager* memoryManager, void* block)
+void ArenaAllocator::freeHostMemory(void* block, size_t size)
{
- assert(memoryManager != nullptr);
-
#if defined(DEBUG)
if (bypassHostAllocator())
{
::HeapFree(GetProcessHeap(), 0, block);
+ return;
}
- else
- {
- ClrFreeInProcessHeap(0, block);
- }
-#else // defined(DEBUG)
- memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
#endif // !defined(DEBUG)
+
+ g_jitHost->freeSlab(block, size);
}
//------------------------------------------------------------------------
// See above.
size_t ArenaAllocator::getTotalBytesAllocated()
{
- assert(isInitialized());
-
size_t bytes = 0;
for (PageDescriptor* page = m_firstPage; page != nullptr; page = page->m_next)
{
// that are unused across all area pages.
size_t ArenaAllocator::getTotalBytesUsed()
{
- assert(isInitialized());
-
if (m_lastPage != nullptr)
{
m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
return bytes;
}
-//------------------------------------------------------------------------
-// ArenaAllocator::shutdown:
-// Performs any necessary teardown for the arena allocator subsystem.
-void ArenaAllocator::shutdown()
-{
- s_pagePool.shutdown();
-}
-
#if MEASURE_MEM_ALLOC
CritSecObject ArenaAllocator::s_statsLock;
ArenaAllocator::AggregateMemStats ArenaAllocator::s_aggStats;
struct PageDescriptor
{
- union {
- // Used when the page is allocated
- PageDescriptor* m_next;
- // Used by the pooled page when available
- IEEMemoryManager* m_memoryManager;
- };
+ PageDescriptor* m_next;
size_t m_pageBytes; // # of bytes allocated
size_t m_usedBytes; // # of bytes actually used. (This is only valid when we've allocated a new page.)
BYTE m_contents[];
};
- // Anything less than 64K leaves VM holes since the OS allocates address space in this size.
- // Thus if we want to make this smaller, we need to do a reserve / commit scheme
enum
{
- DEFAULT_PAGE_SIZE = 16 * OS_page_size,
+ DEFAULT_PAGE_SIZE = 0x10000,
};
- class SinglePagePool;
-
- static SinglePagePool s_pagePool;
-
- IEEMemoryManager* m_memoryManager;
-
PageDescriptor* m_firstPage;
PageDescriptor* m_lastPage;
BYTE* m_nextFreeByte;
BYTE* m_lastFreeByte;
- bool isInitialized();
-
void* allocateNewPage(size_t size);
- static void* allocateHostMemory(IEEMemoryManager* memoryManager, size_t size);
- static void freeHostMemory(IEEMemoryManager* memoryManager, void* block);
+ static void* allocateHostMemory(size_t size, size_t* pActualSize);
+ static void freeHostMemory(void* block, size_t size);
#if MEASURE_MEM_ALLOC
struct MemStats
public:
ArenaAllocator();
- void initialize(IEEMemoryManager* memoryManager);
-
// NOTE: it would be nice to have a destructor on this type to ensure that any value that
// goes out of scope is either uninitialized or has been torn down via a call to
// destroy(), but this interacts badly in methods that use SEH. #3058 tracks
static bool bypassHostAllocator();
static size_t getDefaultPageSize();
-
- static void shutdown();
};
//------------------------------------------------------------------------
//
inline void* ArenaAllocator::allocateMemory(size_t size)
{
- assert(isInitialized());
assert(size != 0);
// Ensure that we always allocate in pointer sized increments.
DisplayNowayAssertMap();
#endif // MEASURE_NOWAY
- ArenaAllocator::shutdown();
-
/* Shut down the emitter */
emitter::emitDone();
}
else
{
- alloc.initialize(compHnd->getMemoryManager());
pAlloc = &alloc;
}
#define _HOST_H_
/*****************************************************************************/
-const size_t OS_page_size = (4 * 1024);
-
extern FILE* jitstdout;
inline FILE* procstdout()
void* HostAllocator::allocateHostMemory(size_t size)
{
assert(g_jitHost != nullptr);
- return g_jitHost->allocateMemory(size, false);
+ return g_jitHost->allocateMemory(size);
}
void HostAllocator::freeHostMemory(void* p)
{
assert(g_jitHost != nullptr);
- g_jitHost->freeMemory(p, false);
+ g_jitHost->freeMemory(p);
}
debug.cpp
pedecoder.cpp
winfix.cpp
- longfilepathwrappers.cpp
- jithost.cpp
+ longfilepathwrappers.cpp
)
# These source file do not yet compile on Linux.
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#include "stdafx.h"
-
-#include "utilcode.h"
-#include "corjit.h"
-#include "jithost.h"
-
-void* JitHost::allocateMemory(size_t size, bool usePageAllocator)
-{
- WRAPPER_NO_CONTRACT;
-
- if (usePageAllocator)
- {
- return GetEEMemoryManager()->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
- }
- else
- {
- return ClrAllocInProcessHeap(0, S_SIZE_T(size));
- }
-}
-
-void JitHost::freeMemory(void* block, bool usePageAllocator)
-{
- WRAPPER_NO_CONTRACT;
-
- if (usePageAllocator)
- {
- GetEEMemoryManager()->ClrVirtualFree(block, 0, MEM_RELEASE);
- }
- else
- {
- ClrFreeInProcessHeap(0, block);
- }
-}
-
-int JitHost::getIntConfigValue(const wchar_t* name, int defaultValue)
-{
- WRAPPER_NO_CONTRACT;
-
- // Translate JIT call into runtime configuration query
- CLRConfig::ConfigDWORDInfo info{ name, defaultValue, CLRConfig::EEConfig_default };
-
- // Perform a CLRConfig look up on behalf of the JIT.
- return CLRConfig::GetConfigValue(info);
-}
-
-const wchar_t* JitHost::getStringConfigValue(const wchar_t* name)
-{
- WRAPPER_NO_CONTRACT;
-
- // Translate JIT call into runtime configuration query
- CLRConfig::ConfigStringInfo info{ name, CLRConfig::EEConfig_default };
-
- // Perform a CLRConfig look up on behalf of the JIT.
- return CLRConfig::GetConfigValue(info);
-}
-
-void JitHost::freeStringConfigValue(const wchar_t* value)
-{
- WRAPPER_NO_CONTRACT;
-
- CLRConfig::FreeConfigString(const_cast<wchar_t*>(value));
-}
-
-JitHost JitHost::theJitHost;
-ICorJitHost* JitHost::getJitHost()
-{
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_CANNOT_TAKE_LOCK;
-
- return &theJitHost;
-}
ilstubresolver.cpp
inlinetracking.cpp
instmethhash.cpp
+ jithost.cpp
jitinterface.cpp
loaderallocator.cpp
memberload.cpp
#include "finalizerthread.h"
#include "threadsuspend.h"
#include "disassembler.h"
+#include "jithost.h"
#ifndef FEATURE_PAL
#include "dwreport.h"
ExecutionManager::Init();
+ JitHost::Init();
+
#ifndef CROSSGEN_COMPILE
#ifndef FEATURE_PAL
#include "versionresilienthashcode.h"
#include "inlinetracking.h"
+#include "jithost.h"
#ifdef CROSSGEN_COMPILE
CompilationDomain * theDomain;
CompressDebugInfo::CompressBoundariesAndVars(pOffsetMapping, iOffsetMapping, pNativeVarInfo, iNativeVarInfo, pDebugInfoBuffer, NULL);
}
+ICorJitHost* CEECompileInfo::GetJitHost()
+{
+ return JitHost::getJitHost();
+}
+
HRESULT CEECompileInfo::GetBaseJitFlags(
IN CORINFO_METHOD_HANDLE hMethod,
OUT CORJIT_FLAGS *pFlags)
IN CORINFO_METHOD_HANDLE hMethod,
OUT CORJIT_FLAGS *pFlags);
+ ICorJitHost* GetJitHost();
+
void* GetStubSize(void *pStubAddress, DWORD *pSizeToCopy);
HRESULT GetStubClone(void *pStub, BYTE *pBuffer, DWORD dwBufferSize);
../invokeutil.cpp
../inlinetracking.cpp
../contractimpl.cpp
+ ../jithost.cpp
../jitinterface.cpp
../loaderallocator.cpp
../memberload.cpp
#include "finalizerthread.h"
#include "threadsuspend.h"
+#include "jithost.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#endif
-
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
#include "profattach.h"
#endif // FEATURE_PROFAPI_ATTACH_DETACH
bPriorityBoosted = TRUE;
}
+ JitHost::Reclaim();
+
GetFinalizerThread()->DisablePreemptiveGC();
#ifdef _DEBUG
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+
+#include "utilcode.h"
+#include "corjit.h"
+#include "jithost.h"
+
+void* JitHost::allocateMemory(size_t size)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return ClrAllocInProcessHeap(0, S_SIZE_T(size));
+}
+
+void JitHost::freeMemory(void* block)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ClrFreeInProcessHeap(0, block);
+}
+
+int JitHost::getIntConfigValue(const wchar_t* name, int defaultValue)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Translate JIT call into runtime configuration query
+ CLRConfig::ConfigDWORDInfo info{ name, defaultValue, CLRConfig::EEConfig_default };
+
+ // Perform a CLRConfig look up on behalf of the JIT.
+ return CLRConfig::GetConfigValue(info);
+}
+
+const wchar_t* JitHost::getStringConfigValue(const wchar_t* name)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Translate JIT call into runtime configuration query
+ CLRConfig::ConfigStringInfo info{ name, CLRConfig::EEConfig_default };
+
+ // Perform a CLRConfig look up on behalf of the JIT.
+ return CLRConfig::GetConfigValue(info);
+}
+
+void JitHost::freeStringConfigValue(const wchar_t* value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CLRConfig::FreeConfigString(const_cast<wchar_t*>(value));
+}
+
+//
+// Pool memory blocks for JIT to avoid frequent commit/decommit. The frequent commit/decommit has been
+// shown to slow down the JIT significantly (10% or more). The memory blocks used by the JIT tend to be too big
+// to be covered by pooling done by the default malloc.
+//
+// - Keep up to some limit worth of memory, with loose affinization of memory blocks to threads.
+// - On finalizer thread, release the extra memory that was not used recently.
+//
+
+void* JitHost::allocateSlab(size_t size, size_t* pActualSize)
+{
+ size = max(size, sizeof(Slab));
+
+ Thread* pCurrentThread = GetThread();
+ if (m_pCurrentCachedList != NULL || m_pPreviousCachedList != NULL)
+ {
+ CrstHolder lock(&m_jitSlabAllocatorCrst);
+ Slab** ppCandidate = NULL;
+
+ for (Slab ** ppList = &m_pCurrentCachedList; *ppList != NULL; ppList = &(*ppList)->pNext)
+ {
+ Slab* p = *ppList;
+ if (p->size >= size && p->size <= 4 * size) // Avoid wasting more than 4x memory
+ {
+ ppCandidate = ppList;
+ if (p->affinity == pCurrentThread)
+ break;
+ }
+ }
+
+ if (ppCandidate == NULL)
+ {
+ for (Slab ** ppList = &m_pPreviousCachedList; *ppList != NULL; ppList = &(*ppList)->pNext)
+ {
+ Slab* p = *ppList;
+ if (p->size == size) // Allocation from previous list requires exact match
+ {
+ ppCandidate = ppList;
+ if (p->affinity == pCurrentThread)
+ break;
+ }
+ }
+ }
+
+ if (ppCandidate != NULL)
+ {
+ Slab* p = *ppCandidate;
+ *ppCandidate = p->pNext;
+
+ m_totalCached -= p->size;
+ *pActualSize = p->size;
+
+ return p;
+ }
+ }
+
+ *pActualSize = size;
+ return ClrAllocInProcessHeap(0, S_SIZE_T(size));
+}
+
+void JitHost::freeSlab(void* slab, size_t actualSize)
+{
+ _ASSERTE(actualSize >= sizeof(Slab));
+
+ if (actualSize < 0x100000) // Do not cache blocks that are more than 1MB
+ {
+ CrstHolder lock(&m_jitSlabAllocatorCrst);
+
+ if (m_totalCached < 0x1000000) // Do not cache more than 16MB
+ {
+ m_totalCached += actualSize;
+
+ Slab* pSlab = (Slab*)slab;
+ pSlab->size = actualSize;
+ pSlab->affinity = GetThread();
+ pSlab->pNext = m_pCurrentCachedList;
+ m_pCurrentCachedList = pSlab;
+ return;
+ }
+ }
+
+ ClrFreeInProcessHeap(0, slab);
+}
+
+void JitHost::init()
+{
+ m_jitSlabAllocatorCrst.Init(CrstLeafLock);
+}
+
+void JitHost::reclaim()
+{
+ if (m_pCurrentCachedList != NULL || m_pPreviousCachedList != NULL)
+ {
+ DWORD ticks = ::GetTickCount();
+
+ if (m_lastFlush == 0) // Just update m_lastFlush first time around
+ {
+ m_lastFlush = ticks;
+ return;
+ }
+
+ if ((DWORD)(ticks - m_lastFlush) < 2000) // Flush the free lists every 2 seconds
+ return;
+ m_lastFlush = ticks;
+
+ // Flush all slabs in m_pPreviousCachedList
+ for (;;)
+ {
+ Slab* slabToDelete = NULL;
+
+ {
+ CrstHolder lock(&m_jitSlabAllocatorCrst);
+ slabToDelete = m_pPreviousCachedList;
+ if (slabToDelete == NULL)
+ {
+ m_pPreviousCachedList = m_pCurrentCachedList;
+ m_pCurrentCachedList = NULL;
+ break;
+ }
+ m_totalCached -= slabToDelete->size;
+ m_pPreviousCachedList = slabToDelete->pNext;
+ }
+
+ ClrFreeInProcessHeap(0, slabToDelete);
+ }
+ }
+}
+
+JitHost JitHost::s_theJitHost;
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __JITHOST_H__
+#define __JITHOST_H__
+
+// Common implementation of ICorJitHost that respects CLR host policies.
+class JitHost : public ICorJitHost
+{
+private:
+ static JitHost s_theJitHost;
+
+ struct Slab
+ {
+ Slab * pNext;
+ size_t size;
+ Thread* affinity;
+ };
+
+ CrstStatic m_jitSlabAllocatorCrst;
+ Slab* m_pCurrentCachedList;
+ Slab* m_pPreviousCachedList;
+ size_t m_totalCached;
+ DWORD m_lastFlush;
+
+ JitHost() {}
+ JitHost(const JitHost& other) = delete;
+ JitHost& operator=(const JitHost& other) = delete;
+
+ void init();
+ void reclaim();
+
+public:
+ virtual void* allocateMemory(size_t size);
+ virtual void freeMemory(void* block);
+ virtual int getIntConfigValue(const wchar_t* name, int defaultValue);
+ virtual const wchar_t* getStringConfigValue(const wchar_t* name);
+ virtual void freeStringConfigValue(const wchar_t* value);
+ virtual void* allocateSlab(size_t size, size_t* pActualSize);
+ virtual void freeSlab(void* slab, size_t actualSize);
+
+ static void Init() { s_theJitHost.init(); }
+ static void Reclaim() { s_theJitHost.reclaim(); }
+
+ static ICorJitHost* getJitHost() { return &s_theJitHost; }
+};
+
+#endif // __JITHOST_H__
#include "utilcode.h"
#include "corjit.h"
-#include "jithost.h"
#include "corcompile.h"
#include "iceefilegen.h"
#include "corpriv.h"
pJitStartup jitStartupFn = (pJitStartup)GetProcAddress(*phJit, "jitStartup");
if (jitStartupFn != nullptr)
{
- jitStartupFn(JitHost::getJitHost());
+ jitStartupFn(m_pEECompileInfo->GetJitHost());
}
//get the appropriate compiler interface
//
#ifdef FEATURE_MERGE_JIT_AND_ENGINE
- jitStartup(JitHost::getJitHost());
+ jitStartup(m_pEECompileInfo->GetJitHost());
m_pJitCompiler = getJit();
if (m_pJitCompiler == NULL)