1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
11 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
12 #include <pthread_np.h> // for pthread_set_name_np
14 #include <sched.h> // for sched_yield
20 #include <sys/socket.h>
21 #include <sys/resource.h>
23 #include <sys/types.h>
25 #if defined(__linux__)
26 #include <sys/prctl.h> // for prctl
28 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
29 defined(__NetBSD__) || defined(__OpenBSD__)
30 #include <sys/sysctl.h> // for sysctl
33 #include <arpa/inet.h>
34 #include <netinet/in.h>
39 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
41 #include <android/log.h>
47 #include "isolate-inl.h"
53 // 0 is never a valid thread id.
54 static const pthread_t kNoThread = (pthread_t) 0;
57 uint64_t OS::CpuFeaturesImpliedByPlatform() {
59 // Mac OS X requires all these to install so we can assume they are present.
60 // These constants are defined by the CPUid instructions.
61 const uint64_t one = 1;
62 return (one << SSE2) | (one << CMOV);
64 return 0; // Nothing special about the other systems.
69 // Maximum size of the virtual memory. 0 means there is no artificial
72 intptr_t OS::MaxVirtualMemory() {
74 int result = getrlimit(RLIMIT_DATA, &limit);
75 if (result != 0) return 0;
77 // The NaCl compiler doesn't like resource.h constants.
78 if (static_cast<int>(limit.rlim_cur) == -1) return 0;
80 if (limit.rlim_cur == RLIM_INFINITY) return 0;
82 return limit.rlim_cur;
86 uint64_t OS::TotalPhysicalMemory() {
92 size_t len = sizeof(size);
93 if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
97 return static_cast<uint64_t>(size);
100 size_t size = sizeof(pages);
101 sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
102 sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
103 if (pages == -1 || page_size == -1) {
107 return static_cast<uint64_t>(pages) * page_size;
109 MEMORYSTATUS memory_info;
110 memory_info.dwLength = sizeof(memory_info);
111 if (!GlobalMemoryStatus(&memory_info)) {
115 return static_cast<uint64_t>(memory_info.dwTotalPhys);
117 struct stat stat_buf;
118 if (stat("/proc", &stat_buf) != 0) {
122 return static_cast<uint64_t>(stat_buf.st_size);
124 intptr_t pages = sysconf(_SC_PHYS_PAGES);
125 intptr_t page_size = sysconf(_SC_PAGESIZE);
126 if (pages == -1 || page_size == -1) {
130 return static_cast<uint64_t>(pages) * page_size;
135 int OS::ActivationFrameAlignment() {
136 #if V8_TARGET_ARCH_ARM
137 // On EABI ARM targets this is required for fp correctness in the
140 #elif V8_TARGET_ARCH_MIPS
143 // Otherwise we just assume 16 byte alignment, i.e.:
144 // - With gcc 4.4 the tree vectorization optimizer can generate code
145 // that requires 16 byte alignment such as movdqa on x86.
146 // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
147 // see "Mac OS X ABI Function Call Guide"
153 intptr_t OS::CommitPageSize() {
154 static intptr_t page_size = getpagesize();
159 void OS::Free(void* address, const size_t size) {
160 // TODO(1240712): munmap has a return value which is ignored here.
161 int result = munmap(address, size);
167 // Get rid of writable permission on code allocations.
168 void OS::ProtectCode(void* address, const size_t size) {
171 VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
173 // The Native Client port of V8 uses an interpreter, so
174 // code pages don't need PROT_EXEC.
175 mprotect(address, size, PROT_READ);
177 mprotect(address, size, PROT_READ | PROT_EXEC);
182 // Create guard pages.
183 void OS::Guard(void* address, const size_t size) {
186 VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
188 mprotect(address, size, PROT_NONE);
193 void* OS::GetRandomMmapAddr() {
195 // TODO(bradchen): restore randomization once Native Client gets
196 // smarter about using mmap address hints.
197 // See http://code.google.com/p/nativeclient/issues/3341
200 #if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
201 defined(THREAD_SANITIZER)
202 // Dynamic tools do not support custom mmap addresses.
205 Isolate* isolate = Isolate::UncheckedCurrent();
206 // Note that the current isolate isn't set up in a call path via
207 // CpuFeatures::Probe. We don't care about randomization in this case because
208 // the code page is immediately freed.
209 if (isolate != NULL) {
211 isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
212 #if V8_TARGET_ARCH_X64
213 // Currently available CPUs have 48 bits of virtual addressing. Truncate
214 // the hint address to 46 bits to give the kernel a fighting chance of
215 // fulfilling our placement request.
216 raw_addr &= V8_UINT64_C(0x3ffffffff000);
218 raw_addr &= 0x3ffff000;
221 // For our Solaris/illumos mmap hint, we pick a random address in the bottom
222 // half of the top half of the address space (that is, the third quarter).
223 // Because we do not MAP_FIXED, this will be treated only as a hint -- the
224 // system will not fail to mmap() because something else happens to already
225 // be mapped at our random address. We deliberately set the hint high enough
226 // to get well above the system's break (that is, the heap); Solaris and
227 // illumos will try the hint and if that fails allocate as if there were
228 // no hint at all. The high hint prevents the break from getting hemmed in
229 // at low values, ceding half of the address space to the system heap.
230 raw_addr += 0x80000000;
232 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
233 // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
235 raw_addr += 0x20000000;
238 return reinterpret_cast<void*>(raw_addr);
244 size_t OS::AllocateAlignment() {
245 return static_cast<size_t>(sysconf(_SC_PAGESIZE));
249 void OS::Sleep(int milliseconds) {
250 useconds_t ms = static_cast<useconds_t>(milliseconds);
256 if (FLAG_hard_abort) {
257 V8_IMMEDIATE_CRASH();
259 // Redirect to std abort to signal abnormal program termination.
264 void OS::DebugBreak() {
267 #elif V8_HOST_ARCH_ARM64
269 #elif V8_HOST_ARCH_MIPS
271 #elif V8_HOST_ARCH_IA32
272 #if defined(__native_client__)
276 #endif // __native_client__
277 #elif V8_HOST_ARCH_X64
280 #error Unsupported host architecture.
285 // ----------------------------------------------------------------------------
288 double modulo(double x, double y) {
289 return std::fmod(x, y);
293 #define UNARY_MATH_FUNCTION(name, generator) \
294 static UnaryMathFunction fast_##name##_function = NULL; \
295 void init_fast_##name##_function() { \
296 fast_##name##_function = generator; \
298 double fast_##name(double x) { \
299 return (*fast_##name##_function)(x); \
302 UNARY_MATH_FUNCTION(exp, CreateExpFunction())
303 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
305 #undef UNARY_MATH_FUNCTION
308 void lazily_initialize_fast_exp() {
309 if (fast_exp_function == NULL) {
310 init_fast_exp_function();
315 double OS::nan_value() {
316 // NAN from math.h is defined in C99 and not in POSIX.
321 int OS::GetCurrentProcessId() {
322 return static_cast<int>(getpid());
326 // ----------------------------------------------------------------------------
327 // POSIX date/time support.
330 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
333 if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
334 *secs = usage.ru_utime.tv_sec;
335 *usecs = usage.ru_utime.tv_usec;
340 double OS::TimeCurrentMillis() {
341 return Time::Now().ToJsTime();
345 class TimezoneCache {};
348 TimezoneCache* OS::CreateTimezoneCache() {
353 void OS::DisposeTimezoneCache(TimezoneCache* cache) {
354 ASSERT(cache == NULL);
358 void OS::ClearTimezoneCache(TimezoneCache* cache) {
359 ASSERT(cache == NULL);
363 double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
364 if (std::isnan(time)) return nan_value();
365 time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
366 struct tm* t = localtime(&tv);
367 if (NULL == t) return nan_value();
368 return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
372 int OS::GetLastError() {
377 // ----------------------------------------------------------------------------
378 // POSIX stdio support.
381 FILE* OS::FOpen(const char* path, const char* mode) {
382 FILE* file = fopen(path, mode);
383 if (file == NULL) return NULL;
384 struct stat file_stat;
385 if (fstat(fileno(file), &file_stat) != 0) return NULL;
386 bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
387 if (is_regular_file) return file;
393 bool OS::Remove(const char* path) {
394 return (remove(path) == 0);
398 FILE* OS::OpenTemporaryFile() {
403 const char* const OS::LogFileOpenMode = "w";
406 void OS::Print(const char* format, ...) {
408 va_start(args, format);
409 VPrint(format, args);
414 void OS::VPrint(const char* format, va_list args) {
415 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
416 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
418 vprintf(format, args);
423 void OS::FPrint(FILE* out, const char* format, ...) {
425 va_start(args, format);
426 VFPrint(out, format, args);
431 void OS::VFPrint(FILE* out, const char* format, va_list args) {
432 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
433 __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
435 vfprintf(out, format, args);
440 void OS::PrintError(const char* format, ...) {
442 va_start(args, format);
443 VPrintError(format, args);
448 void OS::VPrintError(const char* format, va_list args) {
449 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
450 __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
452 vfprintf(stderr, format, args);
457 int OS::SNPrintF(Vector<char> str, const char* format, ...) {
459 va_start(args, format);
460 int result = VSNPrintF(str, format, args);
466 int OS::VSNPrintF(Vector<char> str,
469 int n = vsnprintf(str.start(), str.length(), format, args);
470 if (n < 0 || n >= str.length()) {
471 // If the length is zero, the assignment fails.
472 if (str.length() > 0)
473 str[str.length() - 1] = '\0';
481 #if V8_TARGET_ARCH_IA32
482 static void MemMoveWrapper(void* dest, const void* src, size_t size) {
483 memmove(dest, src, size);
487 // Initialize to library version so we can call this at any time during startup.
488 static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
490 // Defined in codegen-ia32.cc.
491 OS::MemMoveFunction CreateMemMoveFunction();
493 // Copy memory area. No restrictions.
494 void OS::MemMove(void* dest, const void* src, size_t size) {
495 if (size == 0) return;
496 // Note: here we rely on dependent reads being ordered. This is true
497 // on all architectures we currently support.
498 (*memmove_function)(dest, src, size);
501 #elif defined(V8_HOST_ARCH_ARM)
502 void OS::MemCopyUint16Uint8Wrapper(uint16_t* dest,
505 uint16_t *limit = dest + chars;
506 while (dest < limit) {
507 *dest++ = static_cast<uint16_t>(*src++);
512 OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
513 OS::MemCopyUint16Uint8Function OS::memcopy_uint16_uint8_function =
514 &OS::MemCopyUint16Uint8Wrapper;
515 // Defined in codegen-arm.cc.
516 OS::MemCopyUint8Function CreateMemCopyUint8Function(
517 OS::MemCopyUint8Function stub);
518 OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
519 OS::MemCopyUint16Uint8Function stub);
521 #elif defined(V8_HOST_ARCH_MIPS)
522 OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
523 // Defined in codegen-mips.cc.
524 OS::MemCopyUint8Function CreateMemCopyUint8Function(
525 OS::MemCopyUint8Function stub);
529 void OS::PostSetUp() {
530 #if V8_TARGET_ARCH_IA32
531 OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
532 if (generated_memmove != NULL) {
533 memmove_function = generated_memmove;
535 #elif defined(V8_HOST_ARCH_ARM)
536 OS::memcopy_uint8_function =
537 CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
538 OS::memcopy_uint16_uint8_function =
539 CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
540 #elif defined(V8_HOST_ARCH_MIPS)
541 OS::memcopy_uint8_function =
542 CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
544 // fast_exp is initialized lazily.
545 init_fast_sqrt_function();
549 // ----------------------------------------------------------------------------
550 // POSIX string support.
553 char* OS::StrChr(char* str, int c) {
554 return strchr(str, c);
558 void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
559 strncpy(dest.start(), src, n);
563 // ----------------------------------------------------------------------------
564 // POSIX thread support.
567 class Thread::PlatformData : public Malloced {
569 PlatformData() : thread_(kNoThread) {}
570 pthread_t thread_; // Thread handle for pthread.
571 // Synchronizes thread creation
572 Mutex thread_creation_mutex_;
575 Thread::Thread(const Options& options)
576 : data_(new PlatformData),
577 stack_size_(options.stack_size()),
578 start_semaphore_(NULL) {
579 if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
580 stack_size_ = PTHREAD_STACK_MIN;
582 set_name(options.name());
591 static void SetThreadName(const char* name) {
592 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
593 pthread_set_name_np(pthread_self(), name);
595 STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
596 pthread_setname_np(pthread_self(), "%s", name);
598 // pthread_setname_np is only available in 10.6 or later, so test
599 // for it at runtime.
600 int (*dynamic_pthread_setname_np)(const char*);
601 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
602 dlsym(RTLD_DEFAULT, "pthread_setname_np");
603 if (dynamic_pthread_setname_np == NULL)
606 // Mac OS X does not expose the length limit of the name, so hardcode it.
607 static const int kMaxNameLength = 63;
608 STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
609 dynamic_pthread_setname_np(name);
610 #elif defined(PR_SET_NAME)
612 reinterpret_cast<unsigned long>(name), // NOLINT
618 static void* ThreadEntry(void* arg) {
619 Thread* thread = reinterpret_cast<Thread*>(arg);
620 // We take the lock here to make sure that pthread_create finished first since
621 // we don't know which thread will run first (the original thread or the new
623 { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
624 SetThreadName(thread->name());
625 ASSERT(thread->data()->thread_ != kNoThread);
626 thread->NotifyStartedAndRun();
631 void Thread::set_name(const char* name) {
632 strncpy(name_, name, sizeof(name_));
633 name_[sizeof(name_) - 1] = '\0';
637 void Thread::Start() {
640 memset(&attr, 0, sizeof(attr));
641 result = pthread_attr_init(&attr);
642 ASSERT_EQ(0, result);
643 // Native client uses default stack size.
645 if (stack_size_ > 0) {
646 result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
647 ASSERT_EQ(0, result);
651 LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
652 result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
654 ASSERT_EQ(0, result);
655 result = pthread_attr_destroy(&attr);
656 ASSERT_EQ(0, result);
657 ASSERT(data_->thread_ != kNoThread);
662 void Thread::Join() {
663 pthread_join(data_->thread_, NULL);
667 void Thread::YieldCPU() {
668 int result = sched_yield();
669 ASSERT_EQ(0, result);
674 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
676 // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
677 // because pthread_key_t is a pointer type on Cygwin. This will probably not
678 // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
679 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
680 intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
681 return static_cast<Thread::LocalStorageKey>(ptr_key);
683 return static_cast<Thread::LocalStorageKey>(pthread_key);
688 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
690 STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
691 intptr_t ptr_key = static_cast<intptr_t>(local_key);
692 return reinterpret_cast<pthread_key_t>(ptr_key);
694 return static_cast<pthread_key_t>(local_key);
699 #ifdef V8_FAST_TLS_SUPPORTED
701 static Atomic32 tls_base_offset_initialized = 0;
702 intptr_t kMacTlsBaseOffset = 0;
704 // It's safe to do the initialization more that once, but it has to be
705 // done at least once.
706 static void InitializeTlsBaseOffset() {
707 const size_t kBufferSize = 128;
708 char buffer[kBufferSize];
709 size_t buffer_size = kBufferSize;
710 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
711 if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
712 V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
714 // The buffer now contains a string of the form XX.YY.ZZ, where
715 // XX is the major kernel version component.
716 // Make sure the buffer is 0-terminated.
717 buffer[kBufferSize - 1] = '\0';
718 char* period_pos = strchr(buffer, '.');
720 int kernel_version_major =
721 static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
722 // The constants below are taken from pthreads.s from the XNU kernel
723 // sources archive at www.opensource.apple.com.
724 if (kernel_version_major < 11) {
725 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
727 #if V8_HOST_ARCH_IA32
728 kMacTlsBaseOffset = 0x48;
730 kMacTlsBaseOffset = 0x60;
733 // 11.x.x (Lion) changed the offset.
734 kMacTlsBaseOffset = 0;
737 Release_Store(&tls_base_offset_initialized, 1);
741 static void CheckFastTls(Thread::LocalStorageKey key) {
742 void* expected = reinterpret_cast<void*>(0x1234CAFE);
743 Thread::SetThreadLocal(key, expected);
744 void* actual = Thread::GetExistingThreadLocal(key);
745 if (expected != actual) {
746 V8_Fatal(__FILE__, __LINE__,
747 "V8 failed to initialize fast TLS on current kernel");
749 Thread::SetThreadLocal(key, NULL);
752 #endif // V8_FAST_TLS_SUPPORTED
755 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
756 #ifdef V8_FAST_TLS_SUPPORTED
757 bool check_fast_tls = false;
758 if (tls_base_offset_initialized == 0) {
759 check_fast_tls = true;
760 InitializeTlsBaseOffset();
764 int result = pthread_key_create(&key, NULL);
765 ASSERT_EQ(0, result);
767 LocalStorageKey local_key = PthreadKeyToLocalKey(key);
768 #ifdef V8_FAST_TLS_SUPPORTED
769 // If we just initialized fast TLS support, make sure it works.
770 if (check_fast_tls) CheckFastTls(local_key);
776 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
777 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
778 int result = pthread_key_delete(pthread_key);
779 ASSERT_EQ(0, result);
784 void* Thread::GetThreadLocal(LocalStorageKey key) {
785 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
786 return pthread_getspecific(pthread_key);
790 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
791 pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
792 int result = pthread_setspecific(pthread_key, value);
793 ASSERT_EQ(0, result);
798 } } // namespace v8::internal