2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef ExecutableAllocator_h
27 #define ExecutableAllocator_h
29 #include <stddef.h> // for ptrdiff_t
31 #include "assembler/wtf/Assertions.h"
38 #if WTF_PLATFORM_IPHONE
39 #include <libkern/OSCacheControl.h>
43 #if WTF_PLATFORM_SYMBIAN
47 #if WTF_CPU_MIPS && WTF_PLATFORM_LINUX
48 #include <sys/cachectl.h>
51 #if WTF_PLATFORM_WINCE
52 // From pkfuncs.h (private header file from the Platform Builder)
53 #define CACHE_SYNC_ALL 0x07F
54 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
57 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
59 * On Windows, VirtualAlloc effectively allocates in 64K chunks. (Technically,
60 * it allocates in page chunks, but the starting address is always a multiple
61 * of 64K, so each allocation uses up 64K of address space.) So a size less
62 * than that would be pointless. But it turns out that 64KB is a reasonable
63 * size for all platforms.
65 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 16)
67 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
68 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
69 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
70 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
72 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
77 // Something included via windows.h defines a macro with this name,
78 // which causes the function below to fail to compile.
83 const size_t OVERSIZE_ALLOCATION = size_t(-1);
85 inline size_t roundUpAllocationSize(size_t request, size_t granularity)
87 if ((std::numeric_limits<size_t>::max() - granularity) <= request)
88 return OVERSIZE_ALLOCATION;
90 // Round up to next page boundary
91 size_t size = request + (granularity - 1);
92 size = size & ~(granularity - 1);
93 JS_ASSERT(size >= request);
101 //#define DEBUG_STRESS_JSC_ALLOCATOR
105 // These are reference-counted. A new one (from the constructor or create)
106 // starts with a count of 1.
107 class ExecutablePool {
112 #if WTF_PLATFORM_SYMBIAN
116 typedef js::Vector<Allocation, 2, js::SystemAllocPolicy> AllocationList;
118 // Reference count for automatic reclamation.
122 // It should be impossible for us to roll over, because only small
123 // pools have multiple holders, and they have one holder per chunk
124 // of generated code, and they only hold 16KB or so of code.
127 JS_ASSERT(m_refCount);
133 JS_ASSERT(m_refCount != 0);
134 if (--m_refCount == 0)
138 static ExecutablePool* create(size_t n)
140 /* We can't (easily) use js_new() here because the constructor is private. */
141 void *memory = js_malloc(sizeof(ExecutablePool));
142 ExecutablePool *pool = memory ? new(memory) ExecutablePool(n) : NULL;
143 if (!pool || !pool->m_freePtr) {
150 void* alloc(size_t n)
152 JS_ASSERT(m_freePtr <= m_end);
154 // Round 'n' up to a multiple of word size; if all allocations are of
155 // word sized quantities, then all subsequent allocations will be aligned.
156 n = roundUpAllocationSize(n, sizeof(void*));
157 if (n == OVERSIZE_ALLOCATION)
160 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
161 void* result = m_freePtr;
166 // Insufficient space to allocate in the existing pool
167 // so we need allocate into a new pool
168 return poolAllocate(n);
173 Allocation* end = m_pools.end();
174 for (Allocation* ptr = m_pools.begin(); ptr != end; ++ptr)
175 ExecutablePool::systemRelease(*ptr);
178 size_t available() const { return (m_pools.length() > 1) ? 0 : m_end - m_freePtr; }
180 // Flag for downstream use, whether to try to release references to this pool.
183 // GC number in which the m_destroy flag was most recently set. Used downstream to
184 // remember whether m_destroy was computed for the currently active GC.
188 // On OOM, this will return an Allocation where pages is NULL.
189 static Allocation systemAlloc(size_t n);
190 static void systemRelease(const Allocation& alloc);
192 ExecutablePool(size_t n);
194 void* poolAllocate(size_t n);
198 AllocationList m_pools;
201 class ExecutableAllocator {
202 enum ProtectionSeting { Writable, Executable };
204 // Initialization can fail so we use a create method instead.
205 ExecutableAllocator() {}
207 static size_t pageSize;
209 // Returns NULL on OOM.
210 static ExecutableAllocator *create()
212 /* We can't (easily) use js_new() here because the constructor is private. */
213 void *memory = js_malloc(sizeof(ExecutableAllocator));
214 ExecutableAllocator *allocator = memory ? new(memory) ExecutableAllocator() : NULL;
220 ExecutablePool *pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
222 js_delete(allocator);
225 JS_ASSERT(allocator->m_smallAllocationPools.empty());
226 allocator->m_smallAllocationPools.append(pool);
230 ~ExecutableAllocator()
232 for (size_t i = 0; i < m_smallAllocationPools.length(); i++)
233 js_delete(m_smallAllocationPools[i]);
236 // poolForSize returns reference-counted objects. The caller owns a reference
237 // to the object; i.e., poolForSize increments the count before returning the
240 ExecutablePool* poolForSize(size_t n)
242 #ifndef DEBUG_STRESS_JSC_ALLOCATOR
243 // Try to fit in an existing small allocator. Use the pool with the
244 // least available space that is big enough (best-fit). This is the
245 // best strategy because (a) it maximizes the chance of the next
246 // allocation fitting in a small pool, and (b) it minimizes the
247 // potential waste when a small pool is next abandoned.
248 ExecutablePool *minPool = NULL;
249 for (size_t i = 0; i < m_smallAllocationPools.length(); i++) {
250 ExecutablePool *pool = m_smallAllocationPools[i];
251 if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
260 // If the request is large, we just provide a unshared allocator
261 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
262 return ExecutablePool::create(n);
264 // Create a new allocator
265 ExecutablePool* pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
268 // At this point, local |pool| is the owner.
270 if (m_smallAllocationPools.length() < maxSmallPools) {
271 // We haven't hit the maximum number of live pools; add the new pool.
272 m_smallAllocationPools.append(pool);
275 // Find the pool with the least space.
277 for (size_t i = 1; i < m_smallAllocationPools.length(); i++)
278 if (m_smallAllocationPools[i]->available() <
279 m_smallAllocationPools[iMin]->available())
284 // If the new allocator will result in more free space than the small
285 // pool with the least space, then we will use it instead
286 ExecutablePool *minPool = m_smallAllocationPools[iMin];
287 if ((pool->available() - n) > minPool->available()) {
289 m_smallAllocationPools[iMin] = pool;
294 // Pass ownership to the caller.
298 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
299 static void makeWritable(void* start, size_t size)
301 reprotectRegion(start, size, Writable);
304 static void makeExecutable(void* start, size_t size)
306 reprotectRegion(start, size, Executable);
309 static void makeWritable(void*, size_t) {}
310 static void makeExecutable(void*, size_t) {}
314 #if WTF_CPU_X86 || WTF_CPU_X86_64
315 static void cacheFlush(void*, size_t)
319 static void cacheFlush(void* code, size_t size)
321 #if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
322 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
324 asm("rdhwr %0, $1" : "=r" (lineSize));
326 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
327 // mips_expand_synci_loop that may execute synci one more time.
328 // "start" points to the fisrt byte of the cache line.
329 // "end" points to the last byte of the line before the last cache line.
330 // Because size is always a multiple of 4, this is safe to set
331 // "end" to the last byte.
333 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
334 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
335 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
337 intptr_t end = reinterpret_cast<intptr_t>(code) + size;
338 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
341 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
344 #elif WTF_CPU_ARM_THUMB2 && WTF_PLATFORM_IPHONE
345 static void cacheFlush(void* code, size_t size)
347 sys_dcache_flush(code, size);
348 sys_icache_invalidate(code, size);
350 #elif WTF_CPU_ARM_THUMB2 && WTF_PLATFORM_LINUX
351 static void cacheFlush(void* code, size_t size)
363 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
366 #elif WTF_PLATFORM_SYMBIAN
367 static void cacheFlush(void* code, size_t size)
369 User::IMB_Range(code, static_cast<char*>(code) + size);
371 #elif WTF_CPU_ARM_TRADITIONAL && WTF_PLATFORM_LINUX && WTF_COMPILER_RVCT
372 static __asm void cacheFlush(void* code, size_t size);
373 #elif WTF_CPU_ARM_TRADITIONAL && (WTF_PLATFORM_LINUX || WTF_PLATFORM_ANDROID) && WTF_COMPILER_GCC
374 static void cacheFlush(void* code, size_t size)
386 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
389 #elif WTF_PLATFORM_WINCE
390 static void cacheFlush(void* code, size_t size)
392 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
395 #error "The cacheFlush support is missing on this platform."
400 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
401 static void reprotectRegion(void*, size_t, ProtectionSeting);
404 static const size_t maxSmallPools = 4;
405 typedef js::Vector<ExecutablePool *, maxSmallPools, js::SystemAllocPolicy > SmallExecPoolVector;
406 SmallExecPoolVector m_smallAllocationPools;
407 static void intializePageSize();
410 // This constructor can fail due to OOM. If it does, m_freePtr will be
412 inline ExecutablePool::ExecutablePool(size_t n) : m_refCount(1), m_destroy(false), m_gcNumber(0)
414 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
415 if (allocSize == OVERSIZE_ALLOCATION) {
419 #ifdef DEBUG_STRESS_JSC_ALLOCATOR
420 Allocation mem = systemAlloc(size_t(4294967291));
422 Allocation mem = systemAlloc(allocSize);
428 if (!m_pools.append(mem)) {
433 m_freePtr = mem.pages;
434 m_end = m_freePtr + allocSize;
437 inline void* ExecutablePool::poolAllocate(size_t n)
439 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
440 if (allocSize == OVERSIZE_ALLOCATION)
443 #ifdef DEBUG_STRESS_JSC_ALLOCATOR
444 Allocation result = systemAlloc(size_t(4294967291));
446 Allocation result = systemAlloc(allocSize);
451 JS_ASSERT(m_end >= m_freePtr);
452 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
453 // Replace allocation pool
454 m_freePtr = result.pages + n;
455 m_end = result.pages + allocSize;
458 m_pools.append(result);
464 #endif // ENABLE(ASSEMBLER)
466 #endif // !defined(ExecutableAllocator)