From: bmeurer@chromium.org Date: Thu, 25 Jul 2013 09:28:21 +0000 (+0000) Subject: Drop redundant GetRandomMmapAddr() in platform-openbsd.cc. X-Git-Tag: upstream/4.7.83~13209 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0da03091282af1447d16a03f104d21cfac3fd9d7;p=platform%2Fupstream%2Fv8.git Drop redundant GetRandomMmapAddr() in platform-openbsd.cc. TBR=svenpanne@chromium.org Review URL: https://codereview.chromium.org/20284002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15874 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc index 57b6d69..b4a7a4c 100644 --- a/src/platform-openbsd.cc +++ b/src/platform-openbsd.cc @@ -64,33 +64,6 @@ namespace internal { static Mutex* limit_mutex = NULL; -static void* GetRandomMmapAddr() { - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { -#if V8_TARGET_ARCH_X64 - uint64_t rnd1 = V8::RandomPrivate(isolate); - uint64_t rnd2 = V8::RandomPrivate(isolate); - uint64_t raw_addr = (rnd1 << 32) ^ rnd2; - // Currently available CPUs have 48 bits of virtual addressing. Truncate - // the hint address to 46 bits to give the kernel a fighting chance of - // fulfilling our placement request. - raw_addr &= V8_UINT64_C(0x3ffffffff000); -#else - uint32_t raw_addr = V8::RandomPrivate(isolate); - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc). - raw_addr &= 0x3ffff000; - raw_addr += 0x20000000; -#endif - return reinterpret_cast(raw_addr); - } - return NULL; -} - - int OS::ActivationFrameAlignment() { // With gcc 4.4 the tree vectorization optimizer can generate code // that requires 16 byte alignment such as movdqa on x86. @@ -146,7 +119,7 @@ void* OS::Allocate(const size_t requested, bool is_executable) { const size_t msize = RoundUp(requested, AllocateAlignment()); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* addr = GetRandomMmapAddr(); + void* addr = OS::GetRandomMmapAddr(); void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); if (mbase == MAP_FAILED) { LOG(i::Isolate::Current(), @@ -341,7 +314,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment) ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); size_t request_size = RoundUp(size + alignment, static_cast(OS::AllocateAlignment())); - void* reservation = mmap(GetRandomMmapAddr(), + void* reservation = mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, @@ -413,7 +386,7 @@ bool VirtualMemory::Guard(void* address) { void* VirtualMemory::ReserveRegion(size_t size) { - void* result = mmap(GetRandomMmapAddr(), + void* result = mmap(OS::GetRandomMmapAddr(), size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,