1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 // Platform specific code for MacOS goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
34 #include <mach/mach_init.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/getsect.h>
38 #include <AvailabilityMacros.h>
41 #include <semaphore.h>
43 #include <libkern/OSAtomic.h>
44 #include <mach/mach.h>
45 #include <mach/semaphore.h>
46 #include <mach/task.h>
47 #include <mach/vm_statistics.h>
49 #include <sys/resource.h>
50 #include <sys/types.h>
51 #include <sys/sysctl.h>
61 #include "platform-posix.h"
63 #include "vm-state-inl.h"
65 // Manually define these here as weak imports, rather than including execinfo.h.
66 // This lets us launch on 10.4 which does not have these calls.
68 extern int backtrace(void**, int) __attribute__((weak_import));
69 extern char** backtrace_symbols(void* const*, int)
70 __attribute__((weak_import));
71 extern void backtrace_symbols_fd(void* const*, int, int)
72 __attribute__((weak_import));
79 // 0 is never a valid thread id on MacOSX since a pthread_t is
81 static const pthread_t kNoThread = (pthread_t) 0;
84 double ceiling(double x) {
85 // Correct Mac OS X Leopard 'ceil' behavior.
86 if (-1.0 < x && x < 0.0) {
94 static Mutex* limit_mutex = NULL;
97 void OS::PostSetUp() {
102 // We keep the lowest and highest addresses mapped as a quick way of
103 // determining that pointers are outside the heap (used mostly in assertions
104 // and verification). The estimate is conservative, i.e., not all addresses in
105 // 'allocated' space are actually allocated to our heap. The range is
106 // [lowest, highest), inclusive on the low and and exclusive on the high end.
107 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
108 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
111 static void UpdateAllocatedSpaceLimits(void* address, int size) {
112 ASSERT(limit_mutex != NULL);
113 ScopedLock lock(limit_mutex);
115 lowest_ever_allocated = Min(lowest_ever_allocated, address);
116 highest_ever_allocated =
117 Max(highest_ever_allocated,
118 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
122 bool OS::IsOutsideAllocatedSpace(void* address) {
123 return address < lowest_ever_allocated || address >= highest_ever_allocated;
127 size_t OS::AllocateAlignment() {
128 return getpagesize();
132 // Constants used for mmap.
133 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
134 // defined tag 255 This helps identify V8-allocated regions in memory analysis
135 // tools like vmmap(1).
136 static const int kMmapFd = VM_MAKE_TAG(255);
137 static const off_t kMmapFdOffset = 0;
140 void* OS::Allocate(const size_t requested,
142 bool is_executable) {
143 const size_t msize = RoundUp(requested, getpagesize());
144 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
145 void* mbase = mmap(OS::GetRandomMmapAddr(),
148 MAP_PRIVATE | MAP_ANON,
151 if (mbase == MAP_FAILED) {
152 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
156 UpdateAllocatedSpaceLimits(mbase, msize);
161 void OS::Free(void* address, const size_t size) {
162 // TODO(1240712): munmap has a return value which is ignored here.
163 int result = munmap(address, size);
169 void OS::Sleep(int milliseconds) {
170 usleep(1000 * milliseconds);
175 // Redirect to std abort to signal abnormal program termination
180 void OS::DebugBreak() {
185 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
187 PosixMemoryMappedFile(FILE* file, void* memory, int size)
188 : file_(file), memory_(memory), size_(size) { }
189 virtual ~PosixMemoryMappedFile();
190 virtual void* memory() { return memory_; }
191 virtual int size() { return size_; }
199 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
200 FILE* file = fopen(name, "r+");
201 if (file == NULL) return NULL;
203 fseek(file, 0, SEEK_END);
204 int size = ftell(file);
207 mmap(OS::GetRandomMmapAddr(),
209 PROT_READ | PROT_WRITE,
213 return new PosixMemoryMappedFile(file, memory, size);
217 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
219 FILE* file = fopen(name, "w+");
220 if (file == NULL) return NULL;
221 int result = fwrite(initial, size, 1, file);
227 mmap(OS::GetRandomMmapAddr(),
229 PROT_READ | PROT_WRITE,
233 return new PosixMemoryMappedFile(file, memory, size);
237 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
238 if (memory_) OS::Free(memory_, size_);
243 void OS::LogSharedLibraryAddresses() {
244 unsigned int images_count = _dyld_image_count();
245 for (unsigned int i = 0; i < images_count; ++i) {
246 const mach_header* header = _dyld_get_image_header(i);
247 if (header == NULL) continue;
250 char* code_ptr = getsectdatafromheader_64(
251 reinterpret_cast<const mach_header_64*>(header),
257 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
259 if (code_ptr == NULL) continue;
260 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
261 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
262 LOG(Isolate::Current(),
263 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
268 void OS::SignalCodeMovingGC() {
272 uint64_t OS::CpuFeaturesImpliedByPlatform() {
273 // MacOSX requires all these to install so we can assume they are present.
274 // These constants are defined by the CPUid instructions.
275 const uint64_t one = 1;
276 return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
280 int OS::ActivationFrameAlignment() {
281 // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
282 // Function Call Guide".
287 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
293 const char* OS::LocalTimezone(double time) {
294 if (isnan(time)) return "";
295 time_t tv = static_cast<time_t>(floor(time/msPerSecond));
296 struct tm* t = localtime(&tv);
297 if (NULL == t) return "";
302 double OS::LocalTimeOffset() {
303 time_t tv = time(NULL);
304 struct tm* t = localtime(&tv);
305 // tm_gmtoff includes any daylight savings offset, so subtract it.
306 return static_cast<double>(t->tm_gmtoff * msPerSecond -
307 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
311 int OS::StackWalk(Vector<StackFrame> frames) {
312 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
313 if (backtrace == NULL)
316 int frames_size = frames.length();
317 ScopedVector<void*> addresses(frames_size);
319 int frames_count = backtrace(addresses.start(), frames_size);
321 char** symbols = backtrace_symbols(addresses.start(), frames_count);
322 if (symbols == NULL) {
323 return kStackWalkError;
326 for (int i = 0; i < frames_count; i++) {
327 frames[i].address = addresses[i];
328 // Format a text representation of the frame based on the information
330 SNPrintF(MutableCStrVector(frames[i].text,
331 kStackWalkMaxTextLen),
334 // Make sure line termination is in place.
335 frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
344 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
347 VirtualMemory::VirtualMemory(size_t size)
348 : address_(ReserveRegion(size)), size_(size) { }
351 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
352 : address_(NULL), size_(0) {
353 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
354 size_t request_size = RoundUp(size + alignment,
355 static_cast<intptr_t>(OS::AllocateAlignment()));
356 void* reservation = mmap(OS::GetRandomMmapAddr(),
359 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
362 if (reservation == MAP_FAILED) return;
364 Address base = static_cast<Address>(reservation);
365 Address aligned_base = RoundUp(base, alignment);
366 ASSERT_LE(base, aligned_base);
368 // Unmap extra memory reserved before and after the desired block.
369 if (aligned_base != base) {
370 size_t prefix_size = static_cast<size_t>(aligned_base - base);
371 OS::Free(base, prefix_size);
372 request_size -= prefix_size;
375 size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
376 ASSERT_LE(aligned_size, request_size);
378 if (aligned_size != request_size) {
379 size_t suffix_size = request_size - aligned_size;
380 OS::Free(aligned_base + aligned_size, suffix_size);
381 request_size -= suffix_size;
384 ASSERT(aligned_size == request_size);
386 address_ = static_cast<void*>(aligned_base);
387 size_ = aligned_size;
391 VirtualMemory::~VirtualMemory() {
393 bool result = ReleaseRegion(address(), size());
400 void VirtualMemory::Reset() {
406 void* VirtualMemory::ReserveRegion(size_t size) {
407 void* result = mmap(OS::GetRandomMmapAddr(),
410 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
414 if (result == MAP_FAILED) return NULL;
420 bool VirtualMemory::IsReserved() {
421 return address_ != NULL;
425 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
426 return CommitRegion(address, size, is_executable);
430 bool VirtualMemory::Guard(void* address) {
431 OS::Guard(address, OS::CommitPageSize());
436 bool VirtualMemory::CommitRegion(void* address,
438 bool is_executable) {
439 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
440 if (MAP_FAILED == mmap(address,
443 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
449 UpdateAllocatedSpaceLimits(address, size);
454 bool VirtualMemory::Uncommit(void* address, size_t size) {
455 return UncommitRegion(address, size);
459 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
463 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
465 kMmapFdOffset) != MAP_FAILED;
469 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
470 return munmap(address, size) == 0;
474 class Thread::PlatformData : public Malloced {
476 PlatformData() : thread_(kNoThread) {}
477 pthread_t thread_; // Thread handle for pthread.
481 Thread::Thread(const Options& options)
482 : data_(new PlatformData),
483 stack_size_(options.stack_size()) {
484 set_name(options.name());
493 static void SetThreadName(const char* name) {
494 // pthread_setname_np is only available in 10.6 or later, so test
495 // for it at runtime.
496 int (*dynamic_pthread_setname_np)(const char*);
497 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
498 dlsym(RTLD_DEFAULT, "pthread_setname_np");
499 if (!dynamic_pthread_setname_np)
502 // Mac OS X does not expose the length limit of the name, so hardcode it.
503 static const int kMaxNameLength = 63;
505 ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
506 dynamic_pthread_setname_np(name);
510 static void* ThreadEntry(void* arg) {
511 Thread* thread = reinterpret_cast<Thread*>(arg);
512 // This is also initialized by the first argument to pthread_create() but we
513 // don't know which thread will run first (the original thread or the new
514 // one) so we initialize it here too.
515 thread->data()->thread_ = pthread_self();
516 SetThreadName(thread->name());
517 ASSERT(thread->data()->thread_ != kNoThread);
523 void Thread::set_name(const char* name) {
524 strncpy(name_, name, sizeof(name_));
525 name_[sizeof(name_) - 1] = '\0';
529 void Thread::Start() {
530 pthread_attr_t* attr_ptr = NULL;
532 if (stack_size_ > 0) {
533 pthread_attr_init(&attr);
534 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
537 pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
538 ASSERT(data_->thread_ != kNoThread);
542 void Thread::Join() {
543 pthread_join(data_->thread_, NULL);
547 #ifdef V8_FAST_TLS_SUPPORTED
549 static Atomic32 tls_base_offset_initialized = 0;
550 intptr_t kMacTlsBaseOffset = 0;
552 // It's safe to do the initialization more that once, but it has to be
553 // done at least once.
554 static void InitializeTlsBaseOffset() {
555 const size_t kBufferSize = 128;
556 char buffer[kBufferSize];
557 size_t buffer_size = kBufferSize;
558 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
559 if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
560 V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
562 // The buffer now contains a string of the form XX.YY.ZZ, where
563 // XX is the major kernel version component.
564 // Make sure the buffer is 0-terminated.
565 buffer[kBufferSize - 1] = '\0';
566 char* period_pos = strchr(buffer, '.');
568 int kernel_version_major =
569 static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
570 // The constants below are taken from pthreads.s from the XNU kernel
571 // sources archive at www.opensource.apple.com.
572 if (kernel_version_major < 11) {
573 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
575 #if defined(V8_HOST_ARCH_IA32)
576 kMacTlsBaseOffset = 0x48;
578 kMacTlsBaseOffset = 0x60;
581 // 11.x.x (Lion) changed the offset.
582 kMacTlsBaseOffset = 0;
585 Release_Store(&tls_base_offset_initialized, 1);
588 static void CheckFastTls(Thread::LocalStorageKey key) {
589 void* expected = reinterpret_cast<void*>(0x1234CAFE);
590 Thread::SetThreadLocal(key, expected);
591 void* actual = Thread::GetExistingThreadLocal(key);
592 if (expected != actual) {
593 V8_Fatal(__FILE__, __LINE__,
594 "V8 failed to initialize fast TLS on current kernel");
596 Thread::SetThreadLocal(key, NULL);
599 #endif // V8_FAST_TLS_SUPPORTED
602 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
603 #ifdef V8_FAST_TLS_SUPPORTED
604 bool check_fast_tls = false;
605 if (tls_base_offset_initialized == 0) {
606 check_fast_tls = true;
607 InitializeTlsBaseOffset();
611 int result = pthread_key_create(&key, NULL);
614 LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
615 #ifdef V8_FAST_TLS_SUPPORTED
616 // If we just initialized fast TLS support, make sure it works.
617 if (check_fast_tls) CheckFastTls(typed_key);
623 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
624 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
625 int result = pthread_key_delete(pthread_key);
631 void* Thread::GetThreadLocal(LocalStorageKey key) {
632 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
633 return pthread_getspecific(pthread_key);
637 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
638 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
639 pthread_setspecific(pthread_key, value);
643 void Thread::YieldCPU() {
648 class MacOSMutex : public Mutex {
651 pthread_mutexattr_t attr;
652 pthread_mutexattr_init(&attr);
653 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
654 pthread_mutex_init(&mutex_, &attr);
657 virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
659 virtual int Lock() { return pthread_mutex_lock(&mutex_); }
660 virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
662 virtual bool TryLock() {
663 int result = pthread_mutex_trylock(&mutex_);
664 // Return false if the lock is busy and locking failed.
665 if (result == EBUSY) {
668 ASSERT(result == 0); // Verify no other errors.
673 pthread_mutex_t mutex_;
677 Mutex* OS::CreateMutex() {
678 return new MacOSMutex();
682 class MacOSSemaphore : public Semaphore {
684 explicit MacOSSemaphore(int count) {
685 semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
689 semaphore_destroy(mach_task_self(), semaphore_);
692 // The MacOS mach semaphore documentation claims it does not have spurious
693 // wakeups, the way pthreads semaphores do. So the code from the linux
694 // platform is not needed here.
695 void Wait() { semaphore_wait(semaphore_); }
697 bool Wait(int timeout);
699 void Signal() { semaphore_signal(semaphore_); }
702 semaphore_t semaphore_;
706 bool MacOSSemaphore::Wait(int timeout) {
708 ts.tv_sec = timeout / 1000000;
709 ts.tv_nsec = (timeout % 1000000) * 1000;
710 return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
714 Semaphore* OS::CreateSemaphore(int count) {
715 return new MacOSSemaphore(count);
719 class Sampler::PlatformData : public Malloced {
721 PlatformData() : profiled_thread_(mach_thread_self()) {}
724 // Deallocate Mach port for thread.
725 mach_port_deallocate(mach_task_self(), profiled_thread_);
728 thread_act_t profiled_thread() { return profiled_thread_; }
731 // Note: for profiled_thread_ Mach primitives are used instead of PThread's
732 // because the latter doesn't provide thread manipulation primitives required.
733 // For details, consult "Mac OS X Internals" book, Section 7.3.
734 thread_act_t profiled_thread_;
738 class SamplerThread : public Thread {
740 static const int kSamplerThreadStackSize = 64 * KB;
742 explicit SamplerThread(int interval)
743 : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
744 interval_(interval) {}
746 static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
747 static void TearDown() { delete mutex_; }
749 static void AddActiveSampler(Sampler* sampler) {
750 ScopedLock lock(mutex_);
751 SamplerRegistry::AddActiveSampler(sampler);
752 if (instance_ == NULL) {
753 instance_ = new SamplerThread(sampler->interval());
756 ASSERT(instance_->interval_ == sampler->interval());
760 static void RemoveActiveSampler(Sampler* sampler) {
761 ScopedLock lock(mutex_);
762 SamplerRegistry::RemoveActiveSampler(sampler);
763 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
764 RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
770 // Implement Thread::Run().
772 SamplerRegistry::State state;
773 while ((state = SamplerRegistry::GetState()) !=
774 SamplerRegistry::HAS_NO_SAMPLERS) {
775 bool cpu_profiling_enabled =
776 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
777 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
778 // When CPU profiling is enabled both JavaScript and C++ code is
779 // profiled. We must not suspend.
780 if (!cpu_profiling_enabled) {
781 if (rate_limiter_.SuspendIfNecessary()) continue;
783 if (cpu_profiling_enabled) {
784 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
788 if (runtime_profiler_enabled) {
789 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
793 OS::Sleep(interval_);
797 static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
798 if (!sampler->isolate()->IsInitialized()) return;
799 if (!sampler->IsProfiling()) return;
800 SamplerThread* sampler_thread =
801 reinterpret_cast<SamplerThread*>(raw_sampler_thread);
802 sampler_thread->SampleContext(sampler);
805 static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
806 if (!sampler->isolate()->IsInitialized()) return;
807 sampler->isolate()->runtime_profiler()->NotifyTick();
810 void SampleContext(Sampler* sampler) {
811 thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
812 TickSample sample_obj;
813 TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
814 if (sample == NULL) sample = &sample_obj;
816 if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
819 thread_state_flavor_t flavor = x86_THREAD_STATE64;
820 x86_thread_state64_t state;
821 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
823 #define REGISTER_FIELD(name) __r ## name
825 #define REGISTER_FIELD(name) r ## name
826 #endif // __DARWIN_UNIX03
827 #elif V8_HOST_ARCH_IA32
828 thread_state_flavor_t flavor = i386_THREAD_STATE;
829 i386_thread_state_t state;
830 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
832 #define REGISTER_FIELD(name) __e ## name
834 #define REGISTER_FIELD(name) e ## name
835 #endif // __DARWIN_UNIX03
837 #error Unsupported Mac OS X host architecture.
838 #endif // V8_HOST_ARCH
840 if (thread_get_state(profiled_thread,
842 reinterpret_cast<natural_t*>(&state),
843 &count) == KERN_SUCCESS) {
844 sample->state = sampler->isolate()->current_vm_state();
845 sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
846 sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
847 sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
848 sampler->SampleStack(sample);
849 sampler->Tick(sample);
851 thread_resume(profiled_thread);
855 RuntimeProfilerRateLimiter rate_limiter_;
857 // Protects the process wide state below.
858 static Mutex* mutex_;
859 static SamplerThread* instance_;
862 DISALLOW_COPY_AND_ASSIGN(SamplerThread);
865 #undef REGISTER_FIELD
868 Mutex* SamplerThread::mutex_ = NULL;
869 SamplerThread* SamplerThread::instance_ = NULL;
873 // Seed the random number generator. We preserve microsecond resolution.
874 uint64_t seed = Ticks() ^ (getpid() << 16);
875 srandom(static_cast<unsigned int>(seed));
876 limit_mutex = CreateMutex();
877 SamplerThread::SetUp();
881 void OS::TearDown() {
882 SamplerThread::TearDown();
887 Sampler::Sampler(Isolate* isolate, int interval)
893 data_ = new PlatformData;
897 Sampler::~Sampler() {
903 void Sampler::Start() {
906 SamplerThread::AddActiveSampler(this);
910 void Sampler::Stop() {
912 SamplerThread::RemoveActiveSampler(this);
917 } } // namespace v8::internal