#include "cpu-profiler.h"
#include <new>
-#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
}
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
+TickSample* ProfilerEventsProcessor::StartTickSampleEvent() {
+ if (!ticks_buffer_is_empty_ || ticks_buffer_is_initialized_) return NULL;
+ ticks_buffer_is_initialized_ = true;
generator_->Tick();
- TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
- return &evt->sample;
+ ticks_buffer_ = TickSampleEventRecord(enqueue_order_);
+ return &ticks_buffer_.sample;
+}
+
+
+void ProfilerEventsProcessor::FinishTickSampleEvent() {
+ ASSERT(ticks_buffer_is_initialized_ && ticks_buffer_is_empty_);
+ ticks_buffer_is_empty_ = false;
}
namespace v8 {
namespace internal {
-static const int kEventsBufferSize = 256 * KB;
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ period_in_useconds_(period_in_useconds),
+ ticks_buffer_is_empty_(true),
+ ticks_buffer_is_initialized_(false),
enqueue_order_(0) {
}
generator_->RecordTickSample(record.sample);
}
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order == dequeue_order) {
+ if (ticks_buffer_is_empty_) return !ticks_from_vm_buffer_.IsEmpty();
+ if (ticks_buffer_.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
- generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
+ if (ticks_buffer_.sample.frames_count < 0
+ || ticks_buffer_.sample.frames_count > TickSample::kMaxFramesCount) {
+ ticks_buffer_.sample.frames_count = 0;
+ }
+ generator_->RecordTickSample(ticks_buffer_.sample);
+ ticks_buffer_is_empty_ = true;
+ ticks_buffer_is_initialized_ = false;
} else {
return true;
}
}
+void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time,
+ unsigned* dequeue_order) {
+ while (OS::Ticks() < stop_time) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ }
+}
+
+
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ int64_t stop_time = OS::Ticks() + period_in_useconds_;
+ if (sampler_ != NULL) {
+ sampler_->DoSample();
}
- YieldCPU();
+ ProcessEventsQueue(stop_time, &dequeue_order);
}
- // Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
- // Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
}
}
-TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
+TickSample* CpuProfiler::StartTickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->TickSampleEvent();
+ return isolate->cpu_profiler()->processor_->StartTickSampleEvent();
} else {
return NULL;
}
}
+void CpuProfiler::FinishTickSampleEvent(Isolate* isolate) {
+ if (CpuProfiler::is_profiling(isolate)) {
+ isolate->cpu_profiler()->processor_->FinishTickSampleEvent();
+ }
+}
+
+
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
+ Sampler* sampler = isolate->logger()->sampler();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ processor_ = new ProfilerEventsProcessor(generator_,
+ sampler,
+ FLAG_cpu_profiler_sampling_period);
NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
+ processor_->Start();
}
}
void CpuProfiler::StopProcessor() {
+ NoBarrier_Store(&is_profiling_, false);
+ processor_->Stop();
+ processor_->Join();
Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
+ Sampler* sampler = logger->sampler();
sampler->DecreaseProfilingDepth();
+ sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
- NoBarrier_Store(&is_profiling_, false);
- processor_->Stop();
- processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
- // Tick sample events are filled directly in the buffer of the circular
- // queue (because the structure is of fixed width, but usually not all
- // stack frame entries are filled.) This method returns a pointer to the
- // next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
+ // StartTickSampleEvent returns a pointer only if the ticks_buffer_ is empty,
+ // FinishTickSampleEvent marks the ticks_buffer_ as filled.
+ // Finish should be called only after successful Start (returning non-NULL
+ // pointer).
+ INLINE(TickSample* StartTickSampleEvent());
+ INLINE(void FinishTickSampleEvent());
private:
union CodeEventsContainer {
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
+ void ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
+ TickSampleEventRecord ticks_buffer_;
+ bool ticks_buffer_is_empty_;
+ bool ticks_buffer_is_initialized_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
- static TickSample* TickSampleEvent(Isolate* isolate);
+ // Finish should be called only after successful Start (returning non-NULL
+ // pointer).
+ static TickSample* StartTickSampleEvent(Isolate* isolate);
+ static void FinishTickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_period, 1000,
+ "CPU profiler sampling period in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
+
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
+class CpuProfilerSignalHandler {
+ public:
+ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ ScopedLock lock(mutex_);
+ if (signal_handler_installed_counter_ > 0) {
+ signal_handler_installed_counter_++;
+ return;
+ }
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) {
+ signal_handler_installed_counter_++;
+ }
+ }
+
+ static void RestoreSignalHandler() {
+ ScopedLock lock(mutex_);
+ if (signal_handler_installed_counter_ == 0)
+ return;
+ if (signal_handler_installed_counter_ == 1) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ }
+ signal_handler_installed_counter_--;
+ }
+
+ static bool signal_handler_installed() {
+ return signal_handler_installed_counter_ > 0;
+ }
+
+ private:
+ static int signal_handler_installed_counter_;
+ static struct sigaction old_signal_handler_;
+ static Mutex* mutex_;
+};
+
+
+int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0;
+struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
+Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
+
+
class Sampler::PlatformData : public Malloced {
public:
- PlatformData() : vm_tid_(GetThreadID()) {}
+ PlatformData()
+ : vm_tgid_(getpid()),
+ vm_tid_(GetThreadID()) {}
- int vm_tid() const { return vm_tid_; }
+ void SendProfilingSignal() {
+ if (!CpuProfilerSignalHandler::signal_handler_installed()) return;
+ // Glibc doesn't provide a wrapper for tgkill(2).
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#else
+ syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#endif
+ }
private:
+ const int vm_tgid_;
const int vm_tid_;
};
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
- RestoreSignalHandler();
}
}
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
Sleep(HALF_INTERVAL);
Sleep(HALF_INTERVAL);
} else {
if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
}
}
}
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ static void DoCpuProfile(Sampler* sampler, void*) {
if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ sampler->platform_data()->SendProfilingSignal();
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
sampler->isolate()->runtime_profiler()->NotifyTick();
}
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
-#endif
- }
-
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
#endif // ANDROID
}
- const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
}
#endif
SignalSender::SetUp();
+ CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
+ CpuProfilerSignalHandler::TearDown();
delete limit_mutex;
}
interval_(interval),
profiling_(false),
active_(false),
+ has_processing_thread_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
}
+void Sampler::DoSample() {
+ platform_data()->SendProfilingSignal();
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+ CpuProfilerSignalHandler::InstallSignalHandler();
+}
+
+
+void Sampler::StopSampling() {
+ CpuProfilerSignalHandler::RestoreSignalHandler();
+}
+
+
} } // namespace v8::internal
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
thread_resume(profiled_thread);
}
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
sampler->SampleStack(sample);
sampler->Tick(sample);
+ CpuProfiler::FinishTickSampleEvent(isolate);
}
class Sampler::PlatformData : public Malloced {
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SetActive(false);
}
+
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
memset(&context, 0, sizeof(context));
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+ CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
}
+void Sampler::DoSample() {
+ // TODO(rogulenko): implement
+}
+
+
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
}
+void Sampler::StartSampling() {
+}
+
+
+void Sampler::StopSampling() {
+}
+
+
} } // namespace v8::internal
IncSamplesTaken();
}
+ // Performs platform-specific stack sampling.
+ void DoSample();
+
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
void Start();
void Stop();
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ // Perform platform-specific initialization before DoSample() may be invoked.
+ void StartSampling();
+ // Perform platform-specific cleanup after samping.
+ void StopSampling();
+ void IncreaseProfilingDepth() {
+ if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) {
+ StartSampling();
+ }
+ }
+ void DecreaseProfilingDepth() {
+ if (!NoBarrier_AtomicIncrement(&profiling_, -1)) {
+ StopSampling();
+ }
+ }
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
+ Atomic32 has_processing_thread_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
+#include "platform.h"
#include "../include/v8-profiler.h"
using i::CodeEntry;
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.Stop();
processor.Join();
return reinterpret_cast<i::Address>(n);
}
-static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
- i::Address frame1,
- i::Address frame2 = NULL,
- i::Address frame3 = NULL) {
- i::TickSample* sample = proc->TickSampleEvent();
+static void AddTickSampleEvent(ProfilerEventsProcessor* processor,
+ i::Address frame1,
+ i::Address frame2 = NULL,
+ i::Address frame3 = NULL) {
+ i::TickSample* sample;
+ i::OS::Sleep(20);
+ while ((sample = processor->StartTickSampleEvent()) == NULL) i::OS::Sleep(20);
sample->pc = frame1;
sample->tos = frame1;
sample->frames_count = 0;
sample->stack[1] = frame3;
sample->frames_count = 2;
}
+ processor->FinishTickSampleEvent();
}
namespace {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
// Enqueue code creation events.
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeCreateEvent(i::Logger::STUB_TAG, 4, ToAddress(0x1605), 0x10);
- // Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
+ // Add a tick event to enable code events processing.
+ AddTickSampleEvent(&processor, ToAddress(0x1000));
processor.Stop();
processor.Join();
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"ddd",
ToAddress(0x1400),
0x80);
- EnqueueTickSampleEvent(&processor, ToAddress(0x1210));
- EnqueueTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
- EnqueueTickSampleEvent(&processor,
- ToAddress(0x1404),
- ToAddress(0x1305),
- ToAddress(0x1230));
+ AddTickSampleEvent(&processor, ToAddress(0x1210));
+ AddTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
+ AddTickSampleEvent(&processor,
+ ToAddress(0x1404),
+ ToAddress(0x1305),
+ ToAddress(0x1230));
processor.Stop();
processor.Join();
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
ToAddress(0x1200),
0x80);
- i::TickSample* sample = processor.TickSampleEvent();
+ i::TickSample* sample = processor.StartTickSampleEvent();
sample->pc = ToAddress(0x1200);
sample->tos = 0;
sample->frames_count = i::TickSample::kMaxFramesCount;
for (int i = 0; i < sample->frames_count; ++i) {
sample->stack[i] = ToAddress(0x1200);
}
+ processor.FinishTickSampleEvent();
processor.Stop();
processor.Join();