1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Platform-specific code for QNX goes here. For the POSIX-compatible
6 // parts the implementation is in platform-posix.cc.
10 #include <semaphore.h>
13 #include <sys/resource.h>
15 #include <sys/types.h>
18 // QNX requires memory pages to be marked as executable.
19 // Otherwise, the OS raises an exception when executing code in that page.
21 #include <fcntl.h> // open
23 #include <strings.h> // index
24 #include <sys/mman.h> // mmap & munmap
25 #include <sys/procfs.h>
26 #include <sys/stat.h> // open
27 #include <unistd.h> // sysconf
33 #include "src/base/macros.h"
34 #include "src/base/platform/platform.h"
40 // 0 is never a valid thread id on Qnx since tids and pids share a
41 // name space and pid 0 is reserved (see man 2 kill).
42 static const pthread_t kNoThread = (pthread_t) 0;
47 bool OS::ArmUsingHardFloat() {
48 // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
49 // the Floating Point ABI used (PCS stands for Procedure Call Standard).
50 // We use these as well as a couple of other defines to statically determine
52 // GCC versions 4.4 and below don't support hard-fp.
53 // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
56 #define GCC_VERSION (__GNUC__ * 10000 \
57 + __GNUC_MINOR__ * 100 \
58 + __GNUC_PATCHLEVEL__)
59 #if GCC_VERSION >= 40600
60 #if defined(__ARM_PCS_VFP)
66 #elif GCC_VERSION < 40500
70 #if defined(__ARM_PCS_VFP)
72 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
76 #error "Your version of GCC does not report the FP ABI compiled for." \
77 "Please report it on this issue" \
78 "http://code.google.com/p/v8/issues/detail?id=2140"
88 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
89 if (std::isnan(time)) return "";
90 time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
91 struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
92 if (NULL == t) return "";
97 double OS::LocalTimeOffset(TimezoneCache* cache) {
98 time_t tv = time(NULL);
99 struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
100 // tm_gmtoff includes any daylight savings offset, so subtract it.
101 return static_cast<double>(t->tm_gmtoff * msPerSecond -
102 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
106 void* OS::Allocate(const size_t requested,
108 bool is_executable) {
109 const size_t msize = RoundUp(requested, AllocateAlignment());
110 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
111 void* addr = OS::GetRandomMmapAddr();
112 void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
113 if (mbase == MAP_FAILED) return NULL;
119 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
120 std::vector<SharedLibraryAddress> result;
121 procfs_mapinfo *mapinfos = NULL, *mapinfo;
125 procfs_debuginfo info;
129 char buf[PATH_MAX + 1];
130 snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
132 if ((proc_fd = open(buf, O_RDONLY)) == -1) {
137 /* Get the number of map entries. */
138 if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
143 mapinfos = reinterpret_cast<procfs_mapinfo *>(
144 malloc(num * sizeof(procfs_mapinfo)));
145 if (mapinfos == NULL) {
150 /* Fill the map entries. */
151 if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
152 mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
158 for (i = 0; i < num; i++) {
159 mapinfo = mapinfos + i;
160 if (mapinfo->flags & MAP_ELF) {
161 map.info.vaddr = mapinfo->vaddr;
162 if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
165 result.push_back(SharedLibraryAddress(
166 map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
175 void OS::SignalCodeMovingGC() {
179 // Constants used for mmap.
180 static const int kMmapFd = -1;
181 static const int kMmapFdOffset = 0;
184 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
187 VirtualMemory::VirtualMemory(size_t size)
188 : address_(ReserveRegion(size)), size_(size) { }
191 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
192 : address_(NULL), size_(0) {
193 DCHECK((alignment % OS::AllocateAlignment()) == 0);
194 size_t request_size = RoundUp(size + alignment,
195 static_cast<intptr_t>(OS::AllocateAlignment()));
196 void* reservation = mmap(OS::GetRandomMmapAddr(),
199 MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
202 if (reservation == MAP_FAILED) return;
204 uint8_t* base = static_cast<uint8_t*>(reservation);
205 uint8_t* aligned_base = RoundUp(base, alignment);
206 DCHECK_LE(base, aligned_base);
208 // Unmap extra memory reserved before and after the desired block.
209 if (aligned_base != base) {
210 size_t prefix_size = static_cast<size_t>(aligned_base - base);
211 OS::Free(base, prefix_size);
212 request_size -= prefix_size;
215 size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
216 DCHECK_LE(aligned_size, request_size);
218 if (aligned_size != request_size) {
219 size_t suffix_size = request_size - aligned_size;
220 OS::Free(aligned_base + aligned_size, suffix_size);
221 request_size -= suffix_size;
224 DCHECK(aligned_size == request_size);
226 address_ = static_cast<void*>(aligned_base);
227 size_ = aligned_size;
231 VirtualMemory::~VirtualMemory() {
233 bool result = ReleaseRegion(address(), size());
240 bool VirtualMemory::IsReserved() {
241 return address_ != NULL;
245 void VirtualMemory::Reset() {
251 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
252 return CommitRegion(address, size, is_executable);
256 bool VirtualMemory::Uncommit(void* address, size_t size) {
257 return UncommitRegion(address, size);
261 bool VirtualMemory::Guard(void* address) {
262 OS::Guard(address, OS::CommitPageSize());
267 void* VirtualMemory::ReserveRegion(size_t size) {
268 void* result = mmap(OS::GetRandomMmapAddr(),
271 MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
275 if (result == MAP_FAILED) return NULL;
281 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
282 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
283 if (MAP_FAILED == mmap(base,
286 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
296 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
300 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
302 kMmapFdOffset) != MAP_FAILED;
306 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
307 return munmap(base, size) == 0;
311 bool VirtualMemory::HasLazyCommits() {