1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 // This module contains the platform-specific code. This make the rest of the
29 // code less dependent on operating system, compilers and runtime libraries.
30 // This module does specifically not deal with differences between different
31 // processor architecture.
32 // The platform classes have the same definition for all platforms. The
33 // implementation for a particular platform is put in platform_<os>.cc.
34 // The build system then uses the implementation for the target platform.
36 // This design has been chosen because it is simple and fast. Alternatively,
37 // the platform dependent classes could have been implemented using abstract
38 // superclasses with virtual methods and having specializations for each
39 // platform. This design was rejected because it was more complicated and
40 // slower. It would require factory methods for selecting the right
41 // implementation and the overhead of virtual methods for performance
42 // sensitive like mutex locking/unlocking.
44 #ifndef V8_PLATFORM_H_
45 #define V8_PLATFORM_H_
49 #include "platform/mutex.h"
50 #include "platform/semaphore.h"
52 #include "v8globals.h"
57 int signbit(double x);
66 // Microsoft Visual C++ specific stuff.
69 #include "win32-headers.h"
70 #include "win32-math.h"
72 int strncasecmp(const char* s1, const char* s2, int n);
74 // Visual C++ 2013 and higher implement this function.
76 inline int lrint(double flt) {
78 #if V8_TARGET_ARCH_IA32
84 intgr = static_cast<int>(flt + 0.5);
85 if ((intgr & 1) != 0 && intgr - flt == 0.5) {
86 // If the number is halfway between two integers, round to the even one.
92 #endif // _MSC_VER < 1800
94 #endif // V8_LIBC_MSVCRT
99 double modulo(double x, double y);
101 // Custom implementation of math functions.
102 double fast_exp(double input);
103 double fast_sqrt(double input);
104 // The custom exp implementation needs 16KB of lookup data; initialize it
106 void lazily_initialize_fast_exp();
108 // ----------------------------------------------------------------------------
111 #ifndef V8_NO_FAST_TLS
113 #if defined(_MSC_VER) && V8_HOST_ARCH_IA32
115 #define V8_FAST_TLS_SUPPORTED 1
117 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
119 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
120 const intptr_t kTibInlineTlsOffset = 0xE10;
121 const intptr_t kTibExtraTlsOffset = 0xF94;
122 const intptr_t kMaxInlineSlots = 64;
123 const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
124 ASSERT(0 <= index && index < kMaxSlots);
125 if (index < kMaxInlineSlots) {
126 return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
127 kPointerSize * index));
129 intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
131 return *reinterpret_cast<intptr_t*>(extra +
132 kPointerSize * (index - kMaxInlineSlots));
135 #elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
137 #define V8_FAST_TLS_SUPPORTED 1
139 extern intptr_t kMacTlsBaseOffset;
141 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
143 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
145 #if V8_HOST_ARCH_IA32
146 asm("movl %%gs:(%1,%2,4), %0;"
147 :"=r"(result) // Output must be a writable register.
148 :"r"(kMacTlsBaseOffset), "r"(index));
150 asm("movq %%gs:(%1,%2,8), %0;"
152 :"r"(kMacTlsBaseOffset), "r"(index));
159 #endif // V8_NO_FAST_TLS
162 // ----------------------------------------------------------------------------
165 // This class has static methods for the different platform specific
166 // functions. Add methods here to cope with differences between the
167 // supported platforms.
171 // Initializes the platform OS support that depend on CPU features. This is
172 // called after CPU initialization.
173 static void PostSetUp();
175 // Returns the accumulated user time for thread. This routine
176 // can be used for profiling. The implementation should
177 // strive for high-precision timer resolution, preferable
178 // micro-second resolution.
179 static int GetUserTime(uint32_t* secs, uint32_t* usecs);
181 // Returns current time as the number of milliseconds since
182 // 00:00:00 UTC, January 1, 1970.
183 static double TimeCurrentMillis();
185 // Returns a string identifying the current time zone. The
186 // timestamp is used for determining if DST is in effect.
187 static const char* LocalTimezone(double time);
189 // Returns the local time offset in milliseconds east of UTC without
190 // taking daylight savings time into account.
191 static double LocalTimeOffset();
193 // Returns the daylight savings offset for the given time.
194 static double DaylightSavingsOffset(double time);
196 // Returns last OS error.
197 static int GetLastError();
199 static FILE* FOpen(const char* path, const char* mode);
200 static bool Remove(const char* path);
202 // Opens a temporary file, the file is auto removed on close.
203 static FILE* OpenTemporaryFile();
205 // Log file open mode is platform-dependent due to line ends issues.
206 static const char* const LogFileOpenMode;
208 // Print output to console. This is mostly used for debugging output.
209 // On platforms that has standard terminal output, the output
210 // should go to stdout.
211 static void Print(const char* format, ...);
212 static void VPrint(const char* format, va_list args);
214 // Print output to a file. This is mostly used for debugging output.
215 static void FPrint(FILE* out, const char* format, ...);
216 static void VFPrint(FILE* out, const char* format, va_list args);
218 // Print error output to console. This is mostly used for error message
219 // output. On platforms that has standard terminal output, the output
220 // should go to stderr.
221 static void PrintError(const char* format, ...);
222 static void VPrintError(const char* format, va_list args);
224 // Allocate/Free memory used by JS heap. Pages are readable/writable, but
225 // they are not guaranteed to be executable unless 'executable' is true.
226 // Returns the address of allocated memory, or NULL if failed.
227 static void* Allocate(const size_t requested,
230 static void Free(void* address, const size_t size);
232 // This is the granularity at which the ProtectCode(...) call can set page
234 static intptr_t CommitPageSize();
236 // Mark code segments non-writable.
237 static void ProtectCode(void* address, const size_t size);
239 // Assign memory as a guard page so that access will cause an exception.
240 static void Guard(void* address, const size_t size);
242 // Generate a random address to be used for hinting mmap().
243 static void* GetRandomMmapAddr();
245 // Get the Alignment guaranteed by Allocate().
246 static size_t AllocateAlignment();
248 // Sleep for a number of milliseconds.
249 static void Sleep(const int milliseconds);
251 // Abort the current process.
255 static void DebugBreak();
258 static const int kStackWalkError = -1;
259 static const int kStackWalkMaxNameLen = 256;
260 static const int kStackWalkMaxTextLen = 256;
263 char text[kStackWalkMaxTextLen];
266 class MemoryMappedFile {
268 static MemoryMappedFile* open(const char* name);
269 static MemoryMappedFile* create(const char* name, int size, void* initial);
270 virtual ~MemoryMappedFile() { }
271 virtual void* memory() = 0;
272 virtual int size() = 0;
275 // Safe formatting print. Ensures that str is always null-terminated.
276 // Returns the number of chars written, or -1 if output was truncated.
277 static int SNPrintF(Vector<char> str, const char* format, ...);
278 static int VSNPrintF(Vector<char> str,
282 static char* StrChr(char* str, int c);
283 static void StrNCpy(Vector<char> dest, const char* src, size_t n);
285 // Support for the profiler. Can do nothing, in which case ticks
286 // occuring in shared libraries will not be properly accounted for.
287 static void LogSharedLibraryAddresses(Isolate* isolate);
289 // Support for the profiler. Notifies the external profiling
290 // process that a code moving garbage collection starts. Can do
291 // nothing, in which case the code objects must not move (e.g., by
292 // using --never-compact) if accurate profiling is desired.
293 static void SignalCodeMovingGC();
295 // The return value indicates the CPU features we are sure of because of the
296 // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
298 // This is a little messy because the interpretation is subject to the cross
299 // of the CPU and the OS. The bits in the answer correspond to the bit
300 // positions indicated by the members of the CpuFeature enum from globals.h
301 static uint64_t CpuFeaturesImpliedByPlatform();
303 // The total amount of physical memory available on the current system.
304 static uint64_t TotalPhysicalMemory();
306 // Maximum size of the virtual memory. 0 means there is no artificial
308 static intptr_t MaxVirtualMemory();
310 // Returns the double constant NAN
311 static double nan_value();
313 // Support runtime detection of whether the hard float option of the
315 static bool ArmUsingHardFloat();
317 // Returns the activation frame alignment constraint or zero if
318 // the platform doesn't care. Guaranteed to be a power of two.
319 static int ActivationFrameAlignment();
321 #if defined(V8_TARGET_ARCH_IA32)
322 // Limit below which the extra overhead of the MemCopy function is likely
323 // to outweigh the benefits of faster copying.
324 static const int kMinComplexMemCopy = 64;
326 // Copy memory area. No restrictions.
327 static void MemMove(void* dest, const void* src, size_t size);
328 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
330 // Keep the distinction of "move" vs. "copy" for the benefit of other
332 static void MemCopy(void* dest, const void* src, size_t size) {
333 MemMove(dest, src, size);
335 #elif defined(V8_HOST_ARCH_ARM)
336 typedef void (*MemCopyUint8Function)(uint8_t* dest,
339 static MemCopyUint8Function memcopy_uint8_function;
340 static void MemCopyUint8Wrapper(uint8_t* dest,
343 memcpy(dest, src, chars);
345 // For values < 16, the assembler function is slower than the inlined C code.
346 static const int kMinComplexMemCopy = 16;
347 static void MemCopy(void* dest, const void* src, size_t size) {
348 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
349 reinterpret_cast<const uint8_t*>(src),
352 static void MemMove(void* dest, const void* src, size_t size) {
353 memmove(dest, src, size);
356 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
359 static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
360 static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
363 // For values < 12, the assembler function is slower than the inlined C code.
364 static const int kMinComplexConvertMemCopy = 12;
365 static void MemCopyUint16Uint8(uint16_t* dest,
368 (*memcopy_uint16_uint8_function)(dest, src, size);
370 #elif defined(V8_HOST_ARCH_MIPS)
371 typedef void (*MemCopyUint8Function)(uint8_t* dest,
374 static MemCopyUint8Function memcopy_uint8_function;
375 static void MemCopyUint8Wrapper(uint8_t* dest,
378 memcpy(dest, src, chars);
380 // For values < 16, the assembler function is slower than the inlined C code.
381 static const int kMinComplexMemCopy = 16;
382 static void MemCopy(void* dest, const void* src, size_t size) {
383 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
384 reinterpret_cast<const uint8_t*>(src),
387 static void MemMove(void* dest, const void* src, size_t size) {
388 memmove(dest, src, size);
391 // Copy memory area to disjoint memory area.
392 static void MemCopy(void* dest, const void* src, size_t size) {
393 memcpy(dest, src, size);
395 static void MemMove(void* dest, const void* src, size_t size) {
396 memmove(dest, src, size);
398 static const int kMinComplexMemCopy = 16 * kPointerSize;
399 #endif // V8_TARGET_ARCH_IA32
401 static int GetCurrentProcessId();
404 static const int msPerSecond = 1000;
406 DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
409 // Represents and controls an area of reserved memory.
410 // Control of the reserved memory can be assigned to another VirtualMemory
411 // object by assignment or copy-contructing. This removes the reserved memory
412 // from the original object.
413 class VirtualMemory {
415 // Empty VirtualMemory object, controlling no reserved memory.
418 // Reserves virtual memory with size.
419 explicit VirtualMemory(size_t size);
421 // Reserves virtual memory containing an area of the given size that
422 // is aligned per alignment. This may not be at the position returned
424 VirtualMemory(size_t size, size_t alignment);
426 // Releases the reserved memory, if any, controlled by this VirtualMemory
430 // Returns whether the memory has been reserved.
433 // Initialize or resets an embedded VirtualMemory object.
436 // Returns the start address of the reserved memory.
437 // If the memory was reserved with an alignment, this address is not
438 // necessarily aligned. The user might need to round it up to a multiple of
439 // the alignment to get the start of the aligned block.
441 ASSERT(IsReserved());
445 // Returns the size of the reserved memory. The returned value is only
446 // meaningful when IsReserved() returns true.
447 // If the memory was reserved with an alignment, this size may be larger
448 // than the requested size.
449 size_t size() { return size_; }
451 // Commits real memory. Returns whether the operation succeeded.
452 bool Commit(void* address, size_t size, bool is_executable);
454 // Uncommit real memory. Returns whether the operation succeeded.
455 bool Uncommit(void* address, size_t size);
457 // Creates a single guard page at the given address.
458 bool Guard(void* address);
461 ASSERT(IsReserved());
462 // Notice: Order is important here. The VirtualMemory object might live
463 // inside the allocated region.
464 void* address = address_;
467 bool result = ReleaseRegion(address, size);
472 // Assign control of the reserved region to a different VirtualMemory object.
473 // The old object is no longer functional (IsReserved() returns false).
474 void TakeControl(VirtualMemory* from) {
475 ASSERT(!IsReserved());
476 address_ = from->address_;
481 static void* ReserveRegion(size_t size);
483 static bool CommitRegion(void* base, size_t size, bool is_executable);
485 static bool UncommitRegion(void* base, size_t size);
487 // Must be called with a base pointer that has been returned by ReserveRegion
488 // and the same size it was reserved with.
489 static bool ReleaseRegion(void* base, size_t size);
491 // Returns true if OS performs lazy commits, i.e. the memory allocation call
492 // defers actual physical memory allocation till the first memory access.
493 // Otherwise returns false.
494 static bool HasLazyCommits();
497 void* address_; // Start address of the virtual memory.
498 size_t size_; // Size of the virtual memory.
502 // ----------------------------------------------------------------------------
505 // Thread objects are used for creating and running threads. When the start()
506 // method is called the new thread starts running the run() method in the new
507 // thread. The Thread object should not be deallocated before the thread has
512 // Opaque data type for thread-local storage keys.
513 // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
514 // to ensure that enumeration type has correct value range (see Issue 830 for
516 enum LocalStorageKey {
517 LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
518 LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
523 Options() : name_("v8:<unknown>"), stack_size_(0) {}
524 Options(const char* name, int stack_size = 0)
525 : name_(name), stack_size_(stack_size) {}
527 const char* name() const { return name_; }
528 int stack_size() const { return stack_size_; }
535 // Create new thread.
536 explicit Thread(const Options& options);
539 // Start new thread by calling the Run() method on the new thread.
542 // Start new thread and wait until Run() method is called on the new thread.
543 void StartSynchronously() {
544 start_semaphore_ = new Semaphore(0);
546 start_semaphore_->Wait();
547 delete start_semaphore_;
548 start_semaphore_ = NULL;
551 // Wait until thread terminates.
554 inline const char* name() const {
558 // Abstract method for run handler.
559 virtual void Run() = 0;
561 // Thread-local storage.
562 static LocalStorageKey CreateThreadLocalKey();
563 static void DeleteThreadLocalKey(LocalStorageKey key);
564 static void* GetThreadLocal(LocalStorageKey key);
565 static int GetThreadLocalInt(LocalStorageKey key) {
566 return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
568 static void SetThreadLocal(LocalStorageKey key, void* value);
569 static void SetThreadLocalInt(LocalStorageKey key, int value) {
570 SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
572 static bool HasThreadLocal(LocalStorageKey key) {
573 return GetThreadLocal(key) != NULL;
576 #ifdef V8_FAST_TLS_SUPPORTED
577 static inline void* GetExistingThreadLocal(LocalStorageKey key) {
578 void* result = reinterpret_cast<void*>(
579 InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
580 ASSERT(result == GetThreadLocal(key));
584 static inline void* GetExistingThreadLocal(LocalStorageKey key) {
585 return GetThreadLocal(key);
589 // A hint to the scheduler to let another thread run.
590 static void YieldCPU();
593 // The thread name length is limited to 16 based on Linux's implementation of
595 static const int kMaxThreadNameLength = 16;
598 PlatformData* data() { return data_; }
600 void NotifyStartedAndRun() {
601 if (start_semaphore_) start_semaphore_->Signal();
606 void set_name(const char* name);
610 char name_[kMaxThreadNameLength];
612 Semaphore* start_semaphore_;
614 DISALLOW_COPY_AND_ASSIGN(Thread);
617 } } // namespace v8::internal
619 #endif // V8_PLATFORM_H_