1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 // This module contains the platform-specific code. This make the rest of the
29 // code less dependent on operating system, compilers and runtime libraries.
30 // This module does specifically not deal with differences between different
31 // processor architecture.
32 // The platform classes have the same definition for all platforms. The
33 // implementation for a particular platform is put in platform_<os>.cc.
34 // The build system then uses the implementation for the target platform.
36 // This design has been chosen because it is simple and fast. Alternatively,
37 // the platform dependent classes could have been implemented using abstract
38 // superclasses with virtual methods and having specializations for each
39 // platform. This design was rejected because it was more complicated and
40 // slower. It would require factory methods for selecting the right
41 // implementation and the overhead of virtual methods for performance
42 // sensitive like mutex locking/unlocking.
44 #ifndef V8_PLATFORM_H_
45 #define V8_PLATFORM_H_
49 int signbit(double x);
56 // Needed for va_list on at least MinGW and Android.
59 #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
64 // Windows specific stuff.
67 // Microsoft Visual C++ specific stuff.
70 #include "win32-math.h"
72 int strncasecmp(const char* s1, const char* s2, int n);
76 // Random is missing on both Visual Studio and MinGW.
81 #include "atomicops.h"
82 #include "lazy-instance.h"
83 #include "platform-tls.h"
85 #include "v8globals.h"
90 // Use AtomicWord for a machine-sized pointer. It is assumed that
91 // reads and writes of naturally aligned values of this type are atomic.
92 typedef intptr_t AtomicWord;
97 double ceiling(double x);
98 double modulo(double x, double y);
100 // Custom implementation of sin, cos, tan and log.
101 double fast_sin(double input);
102 double fast_cos(double input);
103 double fast_tan(double input);
104 double fast_log(double input);
105 double fast_sqrt(double input);
107 // Forward declarations.
110 // ----------------------------------------------------------------------------
113 // This class has static methods for the different platform specific
114 // functions. Add methods here to cope with differences between the
115 // supported platforms.
119 // Initializes the platform OS support. Called once at VM startup.
122 // Initializes the platform OS support that depend on CPU features. This is
123 // called after CPU initialization.
124 static void PostSetUp();
126 // Clean up platform-OS-related things. Called once at VM shutdown.
127 static void TearDown();
129 // Returns the accumulated user time for thread. This routine
130 // can be used for profiling. The implementation should
131 // strive for high-precision timer resolution, preferable
132 // micro-second resolution.
133 static int GetUserTime(uint32_t* secs, uint32_t* usecs);
135 // Get a tick counter normalized to one tick per microsecond.
136 // Used for calculating time intervals.
137 static int64_t Ticks();
139 // Returns current time as the number of milliseconds since
140 // 00:00:00 UTC, January 1, 1970.
141 static double TimeCurrentMillis();
143 // Returns a string identifying the current time zone. The
144 // timestamp is used for determining if DST is in effect.
145 static const char* LocalTimezone(double time);
147 // Returns the local time offset in milliseconds east of UTC without
148 // taking daylight savings time into account.
149 static double LocalTimeOffset();
151 // Returns the daylight savings offset for the given time.
152 static double DaylightSavingsOffset(double time);
154 // Returns last OS error.
155 static int GetLastError();
157 static FILE* FOpen(const char* path, const char* mode);
158 static bool Remove(const char* path);
160 // Opens a temporary file, the file is auto removed on close.
161 static FILE* OpenTemporaryFile();
163 // Log file open mode is platform-dependent due to line ends issues.
164 static const char* const LogFileOpenMode;
166 // Print output to console. This is mostly used for debugging output.
167 // On platforms that has standard terminal output, the output
168 // should go to stdout.
169 static void Print(const char* format, ...);
170 static void VPrint(const char* format, va_list args);
172 // Print output to a file. This is mostly used for debugging output.
173 static void FPrint(FILE* out, const char* format, ...);
174 static void VFPrint(FILE* out, const char* format, va_list args);
176 // Print error output to console. This is mostly used for error message
177 // output. On platforms that has standard terminal output, the output
178 // should go to stderr.
179 static void PrintError(const char* format, ...);
180 static void VPrintError(const char* format, va_list args);
182 // Allocate/Free memory used by JS heap. Pages are readable/writable, but
183 // they are not guaranteed to be executable unless 'executable' is true.
184 // Returns the address of allocated memory, or NULL if failed.
185 static void* Allocate(const size_t requested,
188 static void Free(void* address, const size_t size);
190 // This is the granularity at which the ProtectCode(...) call can set page
192 static intptr_t CommitPageSize();
194 // Mark code segments non-writable.
195 static void ProtectCode(void* address, const size_t size);
197 // Assign memory as a guard page so that access will cause an exception.
198 static void Guard(void* address, const size_t size);
200 // Generate a random address to be used for hinting mmap().
201 static void* GetRandomMmapAddr();
203 // Get the Alignment guaranteed by Allocate().
204 static size_t AllocateAlignment();
206 // Returns an indication of whether a pointer is in a space that
207 // has been allocated by Allocate(). This method may conservatively
208 // always return false, but giving more accurate information may
209 // improve the robustness of the stack dump code in the presence of
211 static bool IsOutsideAllocatedSpace(void* pointer);
213 // Sleep for a number of milliseconds.
214 static void Sleep(const int milliseconds);
216 // Abort the current process.
220 static void DebugBreak();
223 static const int kStackWalkError = -1;
224 static const int kStackWalkMaxNameLen = 256;
225 static const int kStackWalkMaxTextLen = 256;
228 char text[kStackWalkMaxTextLen];
231 static int StackWalk(Vector<StackFrame> frames);
233 // Factory method for creating platform dependent Mutex.
234 // Please use delete to reclaim the storage for the returned Mutex.
235 static Mutex* CreateMutex();
237 // Factory method for creating platform dependent Semaphore.
238 // Please use delete to reclaim the storage for the returned Semaphore.
239 static Semaphore* CreateSemaphore(int count);
241 // Factory method for creating platform dependent Socket.
242 // Please use delete to reclaim the storage for the returned Socket.
243 static Socket* CreateSocket();
245 class MemoryMappedFile {
247 static MemoryMappedFile* open(const char* name);
248 static MemoryMappedFile* create(const char* name, int size, void* initial);
249 virtual ~MemoryMappedFile() { }
250 virtual void* memory() = 0;
251 virtual int size() = 0;
254 // Safe formatting print. Ensures that str is always null-terminated.
255 // Returns the number of chars written, or -1 if output was truncated.
256 static int SNPrintF(Vector<char> str, const char* format, ...);
257 static int VSNPrintF(Vector<char> str,
261 static char* StrChr(char* str, int c);
262 static void StrNCpy(Vector<char> dest, const char* src, size_t n);
264 // Support for the profiler. Can do nothing, in which case ticks
265 // occuring in shared libraries will not be properly accounted for.
266 static void LogSharedLibraryAddresses();
268 // Support for the profiler. Notifies the external profiling
269 // process that a code moving garbage collection starts. Can do
270 // nothing, in which case the code objects must not move (e.g., by
271 // using --never-compact) if accurate profiling is desired.
272 static void SignalCodeMovingGC();
274 // The return value indicates the CPU features we are sure of because of the
275 // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
277 // This is a little messy because the interpretation is subject to the cross
278 // of the CPU and the OS. The bits in the answer correspond to the bit
279 // positions indicated by the members of the CpuFeature enum from globals.h
280 static uint64_t CpuFeaturesImpliedByPlatform();
282 // Maximum size of the virtual memory. 0 means there is no artificial
284 static intptr_t MaxVirtualMemory();
286 // Returns the double constant NAN
287 static double nan_value();
289 // Support runtime detection of VFP3 on ARM CPUs.
290 static bool ArmCpuHasFeature(CpuFeature feature);
292 // Support runtime detection of whether the hard float option of the
294 static bool ArmUsingHardFloat();
296 // Support runtime detection of FPU on MIPS CPUs.
297 static bool MipsCpuHasFeature(CpuFeature feature);
299 // Returns the activation frame alignment constraint or zero if
300 // the platform doesn't care. Guaranteed to be a power of two.
301 static int ActivationFrameAlignment();
303 static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
305 #if defined(V8_TARGET_ARCH_IA32)
306 // Copy memory area to disjoint memory area.
307 static void MemCopy(void* dest, const void* src, size_t size);
308 // Limit below which the extra overhead of the MemCopy function is likely
309 // to outweigh the benefits of faster copying.
310 static const int kMinComplexMemCopy = 64;
311 typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
313 #else // V8_TARGET_ARCH_IA32
314 static void MemCopy(void* dest, const void* src, size_t size) {
315 memcpy(dest, src, size);
317 static const int kMinComplexMemCopy = 256;
318 #endif // V8_TARGET_ARCH_IA32
321 static const int msPerSecond = 1000;
323 DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
326 // Represents and controls an area of reserved memory.
327 // Control of the reserved memory can be assigned to another VirtualMemory
328 // object by assignment or copy-contructing. This removes the reserved memory
329 // from the original object.
330 class VirtualMemory {
332 // Empty VirtualMemory object, controlling no reserved memory.
335 // Reserves virtual memory with size.
336 explicit VirtualMemory(size_t size);
338 // Reserves virtual memory containing an area of the given size that
339 // is aligned per alignment. This may not be at the position returned
341 VirtualMemory(size_t size, size_t alignment);
343 // Releases the reserved memory, if any, controlled by this VirtualMemory
347 // Returns whether the memory has been reserved.
350 // Initialize or resets an embedded VirtualMemory object.
353 // Returns the start address of the reserved memory.
354 // If the memory was reserved with an alignment, this address is not
355 // necessarily aligned. The user might need to round it up to a multiple of
356 // the alignment to get the start of the aligned block.
358 ASSERT(IsReserved());
362 // Returns the size of the reserved memory. The returned value is only
363 // meaningful when IsReserved() returns true.
364 // If the memory was reserved with an alignment, this size may be larger
365 // than the requested size.
366 size_t size() { return size_; }
368 // Commits real memory. Returns whether the operation succeeded.
369 bool Commit(void* address, size_t size, bool is_executable);
371 // Uncommit real memory. Returns whether the operation succeeded.
372 bool Uncommit(void* address, size_t size);
374 // Creates a single guard page at the given address.
375 bool Guard(void* address);
378 ASSERT(IsReserved());
379 // Notice: Order is important here. The VirtualMemory object might live
380 // inside the allocated region.
381 void* address = address_;
384 bool result = ReleaseRegion(address, size);
389 // Assign control of the reserved region to a different VirtualMemory object.
390 // The old object is no longer functional (IsReserved() returns false).
391 void TakeControl(VirtualMemory* from) {
392 ASSERT(!IsReserved());
393 address_ = from->address_;
398 static void* ReserveRegion(size_t size);
400 static bool CommitRegion(void* base, size_t size, bool is_executable);
402 static bool UncommitRegion(void* base, size_t size);
404 // Must be called with a base pointer that has been returned by ReserveRegion
405 // and the same size it was reserved with.
406 static bool ReleaseRegion(void* base, size_t size);
409 void* address_; // Start address of the virtual memory.
410 size_t size_; // Size of the virtual memory.
414 // ----------------------------------------------------------------------------
417 // Thread objects are used for creating and running threads. When the start()
418 // method is called the new thread starts running the run() method in the new
419 // thread. The Thread object should not be deallocated before the thread has
424 // Opaque data type for thread-local storage keys.
425 // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
426 // to ensure that enumeration type has correct value range (see Issue 830 for
428 enum LocalStorageKey {
429 LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
430 LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
435 Options() : name_("v8:<unknown>"), stack_size_(0) {}
436 Options(const char* name, int stack_size = 0)
437 : name_(name), stack_size_(stack_size) {}
439 const char* name() const { return name_; }
440 int stack_size() const { return stack_size_; }
447 // Create new thread.
448 explicit Thread(const Options& options);
451 // Start new thread by calling the Run() method in the new thread.
454 // Wait until thread terminates.
457 inline const char* name() const {
461 // Abstract method for run handler.
462 virtual void Run() = 0;
464 // Thread-local storage.
465 static LocalStorageKey CreateThreadLocalKey();
466 static void DeleteThreadLocalKey(LocalStorageKey key);
467 static void* GetThreadLocal(LocalStorageKey key);
468 static int GetThreadLocalInt(LocalStorageKey key) {
469 return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
471 static void SetThreadLocal(LocalStorageKey key, void* value);
472 static void SetThreadLocalInt(LocalStorageKey key, int value) {
473 SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
475 static bool HasThreadLocal(LocalStorageKey key) {
476 return GetThreadLocal(key) != NULL;
479 #ifdef V8_FAST_TLS_SUPPORTED
480 static inline void* GetExistingThreadLocal(LocalStorageKey key) {
481 void* result = reinterpret_cast<void*>(
482 InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
483 ASSERT(result == GetThreadLocal(key));
487 static inline void* GetExistingThreadLocal(LocalStorageKey key) {
488 return GetThreadLocal(key);
492 // A hint to the scheduler to let another thread run.
493 static void YieldCPU();
496 // The thread name length is limited to 16 based on Linux's implementation of
498 static const int kMaxThreadNameLength = 16;
501 PlatformData* data() { return data_; }
504 void set_name(const char* name);
508 char name_[kMaxThreadNameLength];
511 DISALLOW_COPY_AND_ASSIGN(Thread);
515 // ----------------------------------------------------------------------------
518 // Mutexes are used for serializing access to non-reentrant sections of code.
519 // The implementations of mutex should allow for nested/recursive locking.
525 // Locks the given mutex. If the mutex is currently unlocked, it becomes
526 // locked and owned by the calling thread, and immediately. If the mutex
527 // is already locked by another thread, suspends the calling thread until
528 // the mutex is unlocked.
529 virtual int Lock() = 0;
531 // Unlocks the given mutex. The mutex is assumed to be locked and owned by
532 // the calling thread on entrance.
533 virtual int Unlock() = 0;
535 // Tries to lock the given mutex. Returns whether the mutex was
536 // successfully locked.
537 virtual bool TryLock() = 0;
540 struct CreateMutexTrait {
541 static Mutex* Create() {
542 return OS::CreateMutex();
546 // POD Mutex initialized lazily (i.e. the first time Pointer() is called).
548 // static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
550 // void my_function() {
551 // ScopedLock my_lock(my_mutex.Pointer());
555 typedef LazyDynamicInstance<
556 Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
558 #define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
560 // ----------------------------------------------------------------------------
563 // Stack-allocated ScopedLocks provide block-scoped locking and
564 // unlocking of a mutex.
567 explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
568 ASSERT(mutex_ != NULL);
577 DISALLOW_COPY_AND_ASSIGN(ScopedLock);
581 // ----------------------------------------------------------------------------
584 // A semaphore object is a synchronization object that maintains a count. The
585 // count is decremented each time a thread completes a wait for the semaphore
586 // object and incremented each time a thread signals the semaphore. When the
587 // count reaches zero, threads waiting for the semaphore blocks until the
588 // count becomes non-zero.
592 virtual ~Semaphore() {}
594 // Suspends the calling thread until the semaphore counter is non zero
595 // and then decrements the semaphore counter.
596 virtual void Wait() = 0;
598 // Suspends the calling thread until the counter is non zero or the timeout
599 // time has passed. If timeout happens the return value is false and the
600 // counter is unchanged. Otherwise the semaphore counter is decremented and
601 // true is returned. The timeout value is specified in microseconds.
602 virtual bool Wait(int timeout) = 0;
604 // Increments the semaphore counter.
605 virtual void Signal() = 0;
608 template <int InitialValue>
609 struct CreateSemaphoreTrait {
610 static Semaphore* Create() {
611 return OS::CreateSemaphore(InitialValue);
615 // POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
617 // // The following semaphore starts at 0.
618 // static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
620 // void my_function() {
621 // // Do something with my_semaphore.Pointer().
624 template <int InitialValue>
625 struct LazySemaphore {
626 typedef typename LazyDynamicInstance<
627 Semaphore, CreateSemaphoreTrait<InitialValue>,
628 ThreadSafeInitOnceTrait>::type type;
631 #define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
634 // ----------------------------------------------------------------------------
642 // Server initialization.
643 virtual bool Bind(const int port) = 0;
644 virtual bool Listen(int backlog) const = 0;
645 virtual Socket* Accept() const = 0;
647 // Client initialization.
648 virtual bool Connect(const char* host, const char* port) = 0;
650 // Shutdown socket for both read and write. This causes blocking Send and
651 // Receive calls to exit. After Shutdown the Socket object cannot be used for
652 // any communication.
653 virtual bool Shutdown() = 0;
655 // Data Transimission
656 // Return 0 on failure.
657 virtual int Send(const char* data, int len) const = 0;
658 virtual int Receive(char* data, int len) const = 0;
660 // Set the value of the SO_REUSEADDR socket option.
661 virtual bool SetReuseAddress(bool reuse_address) = 0;
663 virtual bool IsValid() const = 0;
666 static int LastError();
667 static uint16_t HToN(uint16_t value);
668 static uint16_t NToH(uint16_t value);
669 static uint32_t HToN(uint32_t value);
670 static uint32_t NToH(uint32_t value);
674 // ----------------------------------------------------------------------------
677 // A sampler periodically samples the state of the VM and optionally
678 // (if used for profiling) the program counter and stack pointer for
679 // the thread that created it.
681 // TickSample captures the information collected for each sample.
691 has_external_callback(false) {}
692 StateTag state; // The state of the VM.
693 Address pc; // Instruction pointer.
694 Address sp; // Stack pointer.
695 Address fp; // Frame pointer.
697 Address tos; // Top stack value (*sp).
698 Address external_callback;
700 static const int kMaxFramesCount = 64;
701 Address stack[kMaxFramesCount]; // Call stack.
702 int frames_count : 8; // Number of captured frames.
703 bool has_external_callback : 1;
708 // Initialize sampler.
709 Sampler(Isolate* isolate, int interval);
712 int interval() const { return interval_; }
714 // Performs stack sampling.
715 void SampleStack(TickSample* sample) {
716 DoSampleStack(sample);
720 // This method is called for each sampling period with the current
722 virtual void Tick(TickSample* sample) = 0;
724 // Start and stop sampler.
728 // Is the sampler used for profiling?
729 bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
730 void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
731 void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
733 // Whether the sampler is running (that is, consumes resources).
734 bool IsActive() const { return NoBarrier_Load(&active_); }
736 Isolate* isolate() { return isolate_; }
738 // Used in tests to make sure that stack sampling is performed.
739 int samples_taken() const { return samples_taken_; }
740 void ResetSamplesTaken() { samples_taken_ = 0; }
743 PlatformData* data() { return data_; }
745 PlatformData* platform_data() { return data_; }
748 virtual void DoSampleStack(TickSample* sample) = 0;
751 void SetActive(bool value) { NoBarrier_Store(&active_, value); }
752 void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
758 PlatformData* data_; // Platform specific data.
759 int samples_taken_; // Counts stack samples taken.
760 DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
764 } } // namespace v8::internal
766 #endif // V8_PLATFORM_H_