PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
- munmap(addr, size);
+ OS::Free(addr, size);
fclose(f);
}
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
+
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
- ASSERT(base <= aligned_base);
+ ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
- size_t bytes_prior = static_cast<size_t>(aligned_base - base);
- if (bytes_prior > 0) {
- munmap(base, bytes_prior);
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
}
- if (static_cast<size_t>(aligned_base - base) < request_size - size) {
- munmap(aligned_base + size, request_size - size - bytes_prior);
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
}
+ ASSERT(aligned_size == request_size);
+
address_ = static_cast<void*>(aligned_base);
- size_ = size;
+ size_ = aligned_size;
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
+
Address base = static_cast<Address>(reservation);
Address aligned_base = RoundUp(base, alignment);
- ASSERT(base <= aligned_base);
+ ASSERT_LE(base, aligned_base);
// Unmap extra memory reserved before and after the desired block.
- size_t bytes_prior = static_cast<size_t>(aligned_base - base);
- if (bytes_prior > 0) {
- munmap(base, bytes_prior);
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
}
- if (static_cast<size_t>(aligned_base - base) < request_size - size) {
- munmap(aligned_base + size, request_size - size - bytes_prior);
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
}
+ ASSERT(aligned_size == request_size);
+
address_ = static_cast<void*>(aligned_base);
- size_ = size;
+ size_ = aligned_size;
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
- if (memory_) munmap(memory_, size_);
+ if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ OS::Free(address(), size());
+ address_ = MAP_FAILED
}
}
if (address == NULL) return;
Address base = RoundUp(static_cast<Address>(address), alignment);
// Try reducing the size by freeing and then reallocating a specific area.
- ReleaseRegion(address, request_size);
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
if (address != NULL) {
request_size = size;
void Release() {
ASSERT(IsReserved());
- // Notice: Order is somportant here. The VirtualMemory object might live
+ // Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
Reset();
- ReleaseRegion(address, size);
+ bool result = ReleaseRegion(address, size);
+ USE(result);
+ ASSERT(result);
}
// Assign control of the reserved region to a different VirtualMemory object.
isolate_->code_range()->FreeRawMemory(base, size);
} else {
ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
- VirtualMemory::ReleaseRegion(base, size);
+ bool result = VirtualMemory::ReleaseRegion(base, size);
+ USE(result);
+ ASSERT(result);
}
}
regress/regress-1119: FAIL
##############################################################################
-# NewGC: http://code.google.com/p/v8/issues/detail?id=1701
-array-join: SKIP
# NewGC: BUG(1719) slow to collect arrays over several contexts.
regress/regress-524: SKIP