1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice(s), this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified other than the possible
14 * addition of one or more copyright notices.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice(s), this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
27 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
29 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ***** END LICENSE BLOCK ***** */
37 #include "jsgcchunk.h"
43 # pragma warning( disable: 4267 4996 4146 )
48 # define INCL_DOSMEMMGR
51 #elif defined(XP_MACOSX) || defined(DARWIN)
53 # include <libkern/OSAtomic.h>
54 # include <mach/mach_error.h>
55 # include <mach/mach_init.h>
56 # include <mach/vm_map.h>
57 # include <malloc/malloc.h>
59 #elif defined(XP_UNIX) || defined(XP_BEOS)
62 # include <sys/mman.h>
73 * On Windows CE < 6 we must use separated MEM_RESERVE and MEM_COMMIT
74 * VirtualAlloc calls and we cannot use MEM_RESERVE to allocate at the given
75 * address. So we use a workaround based on oversized allocation.
77 # if defined(WINCE) && !defined(MOZ_MEMORY_WINCE6)
79 # define JS_GC_HAS_MAP_ALIGN
82 UnmapPagesAtBase(void *p)
84 JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
88 MapAlignedPages(size_t size, size_t alignment)
90 JS_ASSERT(size % alignment == 0);
91 JS_ASSERT(size >= alignment);
93 void *reserve = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
97 void *p = VirtualAlloc(reserve, size, MEM_COMMIT, PAGE_READWRITE);
98 JS_ASSERT(p == reserve);
100 size_t mask = alignment - 1;
101 size_t offset = (uintptr_t) p & mask;
105 /* Try to extend the initial allocation. */
106 UnmapPagesAtBase(reserve);
107 reserve = VirtualAlloc(NULL, size + alignment - offset, MEM_RESERVE,
111 if (offset == ((uintptr_t) reserve & mask)) {
112 void *aligned = (void *) ((uintptr_t) reserve + alignment - offset);
113 p = VirtualAlloc(aligned, size, MEM_COMMIT, PAGE_READWRITE);
114 JS_ASSERT(p == aligned);
118 /* over allocate to ensure we have an aligned region */
119 UnmapPagesAtBase(reserve);
120 reserve = VirtualAlloc(NULL, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
124 offset = (uintptr_t) reserve & mask;
125 void *aligned = (void *) ((uintptr_t) reserve + alignment - offset);
126 p = VirtualAlloc(aligned, size, MEM_COMMIT, PAGE_READWRITE);
127 JS_ASSERT(p == aligned);
133 UnmapPages(void *p, size_t size)
135 if (VirtualFree(p, 0, MEM_RELEASE))
138 /* We could have used the over allocation. */
139 JS_ASSERT(GetLastError() == ERROR_INVALID_PARAMETER);
140 MEMORY_BASIC_INFORMATION info;
141 VirtualQuery(p, &info, sizeof(info));
143 UnmapPagesAtBase(info.AllocationBase);
149 MapPages(void *addr, size_t size)
151 void *p = VirtualAlloc(addr, size, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
152 JS_ASSERT_IF(p && addr, p == addr);
157 UnmapPages(void *addr, size_t size)
159 JS_ALWAYS_TRUE(VirtualFree(addr, 0, MEM_RELEASE));
164 #elif defined(XP_OS2)
166 #define JS_GC_HAS_MAP_ALIGN 1
167 #define OS2_MAX_RECURSIONS 16
170 UnmapPages(void *addr, size_t size)
172 if (!DosFreeMem(addr))
175 /* if DosFreeMem() failed, 'addr' is probably part of an "expensive"
176 * allocation, so calculate the base address and try again
178 unsigned long cb = 2 * size;
180 if (DosQueryMem(addr, &cb, &flags) || cb < size)
183 jsuword base = reinterpret_cast<jsuword>(addr) - ((2 * size) - cb);
184 DosFreeMem(reinterpret_cast<void*>(base));
190 MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
192 if (++recursions >= OS2_MAX_RECURSIONS)
196 if (DosAllocMem(&tmp, size,
197 OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
198 JS_ALWAYS_TRUE(DosAllocMem(&tmp, size,
199 PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
201 size_t offset = reinterpret_cast<jsuword>(tmp) & (alignment - 1);
205 /* if there are 'filler' bytes of free space above 'tmp', free 'tmp',
206 * then reallocate it as a 'filler'-sized block; assuming we're not
207 * in a race with another thread, the next recursion should succeed
209 size_t filler = size + alignment - offset;
210 unsigned long cb = filler;
211 unsigned long flags = 0;
212 unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
214 if (!rc && (flags & PAG_FREE) && cb >= filler) {
216 if (DosAllocMem(&tmp, filler,
217 OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
218 JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
219 PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
223 void *p = MapAlignedPagesRecursively(size, alignment, recursions);
230 MapAlignedPages(size_t size, size_t alignment)
234 /* make up to OS2_MAX_RECURSIONS attempts to get an aligned block
235 * of the right size by recursively allocating blocks of unaligned
236 * free memory until only an aligned allocation is possible
238 void *p = MapAlignedPagesRecursively(size, alignment, recursions);
242 /* if memory is heavily fragmented, the recursive strategy may fail;
243 * instead, use the "expensive" strategy: allocate twice as much
244 * as requested and return an aligned address within this block
246 if (DosAllocMem(&p, 2 * size,
247 OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
248 JS_ALWAYS_TRUE(DosAllocMem(&p, 2 * size,
249 PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
252 jsuword addr = reinterpret_cast<jsuword>(p);
253 addr = (addr + (alignment - 1)) & ~(alignment - 1);
255 return reinterpret_cast<void *>(addr);
258 #elif defined(XP_MACOSX) || defined(DARWIN)
261 MapPages(void *addr, size_t size)
266 p = (vm_address_t) addr;
269 flags = VM_FLAGS_ANYWHERE;
272 kern_return_t err = vm_allocate((vm_map_t) mach_task_self(),
273 &p, (vm_size_t) size, flags);
274 if (err != KERN_SUCCESS)
278 JS_ASSERT_IF(addr, p == (vm_address_t) addr);
283 UnmapPages(void *addr, size_t size)
285 JS_ALWAYS_TRUE(vm_deallocate((vm_map_t) mach_task_self(),
291 #elif defined(XP_UNIX) || defined(XP_BEOS)
293 /* Required on Solaris 10. Might improve performance elsewhere. */
294 # if defined(SOLARIS) && defined(MAP_ALIGN)
295 # define JS_GC_HAS_MAP_ALIGN
298 MapAlignedPages(size_t size, size_t alignment)
301 * We don't use MAP_FIXED here, because it can cause the *replacement*
302 * of existing mappings, and we only want to create new mappings.
305 void *p = mmap((caddr_t) alignment, size, PROT_READ | PROT_WRITE,
306 MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
308 void *p = mmap((void *) alignment, size, PROT_READ | PROT_WRITE,
309 MAP_PRIVATE | MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
316 # else /* JS_GC_HAS_MAP_ALIGN */
319 MapPages(void *addr, size_t size)
322 * We don't use MAP_FIXED here, because it can cause the *replacement*
323 * of existing mappings, and we only want to create new mappings.
325 void *p = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
329 if (addr && p != addr) {
330 /* We succeeded in mapping memory, but not in the right place. */
331 JS_ALWAYS_TRUE(munmap(p, size) == 0);
337 # endif /* !JS_GC_HAS_MAP_ALIGN */
340 UnmapPages(void *addr, size_t size)
343 JS_ALWAYS_TRUE(munmap((caddr_t) addr, size) == 0);
345 JS_ALWAYS_TRUE(munmap(addr, size) == 0);
353 GCChunkAllocator defaultGCChunkAllocator;
356 FindChunkStart(void *p)
358 jsuword addr = reinterpret_cast<jsuword>(p);
359 addr = (addr + GC_CHUNK_MASK) & ~GC_CHUNK_MASK;
360 return reinterpret_cast<void *>(addr);
363 JS_FRIEND_API(void *)
368 #ifdef JS_GC_HAS_MAP_ALIGN
369 p = MapAlignedPages(GC_CHUNK_SIZE, GC_CHUNK_SIZE);
374 * Windows requires that there be a 1:1 mapping between VM allocation
375 * and deallocation operations. Therefore, take care here to acquire the
376 * final result via one mapping operation. This means unmapping any
377 * preliminary result that is not correctly aligned.
379 p = MapPages(NULL, GC_CHUNK_SIZE);
383 if (reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK) {
384 UnmapPages(p, GC_CHUNK_SIZE);
385 p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);
388 * Over-allocate in order to map a memory region that is
389 * definitely large enough then deallocate and allocate again the
390 * correct size, within the over-sized mapping.
392 p = MapPages(NULL, GC_CHUNK_SIZE * 2);
395 UnmapPages(p, GC_CHUNK_SIZE * 2);
396 p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);
399 * Failure here indicates a race with another thread, so
404 #endif /* !JS_GC_HAS_MAP_ALIGN */
406 JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
414 JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
415 UnmapPages(p, GC_CHUNK_SIZE);