1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gin/public/v8_platform.h"
10 #include "base/bit_cast.h"
11 #include "base/check_op.h"
12 #include "base/debug/stack_trace.h"
13 #include "base/location.h"
14 #include "base/memory/nonscannable_memory.h"
15 #include "base/memory/raw_ptr.h"
16 #include "base/rand_util.h"
17 #include "base/system/sys_info.h"
18 #include "base/task/post_job.h"
19 #include "base/task/task_traits.h"
20 #include "base/task/thread_pool.h"
21 #include "base/task/thread_pool/thread_pool_instance.h"
22 #include "base/trace_event/trace_event.h"
23 #include "base/tracing_buildflags.h"
24 #include "build/build_config.h"
25 #include "gin/per_isolate_data.h"
26 #include "v8_platform_page_allocator.h"
32 base::LazyInstance<V8Platform>::Leaky g_v8_platform = LAZY_INSTANCE_INITIALIZER;
34 constexpr base::TaskTraits kLowPriorityTaskTraits = {
35 base::TaskPriority::BEST_EFFORT};
37 constexpr base::TaskTraits kDefaultTaskTraits = {
38 base::TaskPriority::USER_VISIBLE};
40 constexpr base::TaskTraits kBlockingTaskTraits = {
41 base::TaskPriority::USER_BLOCKING};
43 void PrintStackTrace() {
44 base::debug::StackTrace trace;
48 class ConvertableToTraceFormatWrapper final
49 : public base::trace_event::ConvertableToTraceFormat {
51 explicit ConvertableToTraceFormatWrapper(
52 std::unique_ptr<v8::ConvertableToTraceFormat> inner)
53 : inner_(std::move(inner)) {}
54 ConvertableToTraceFormatWrapper(const ConvertableToTraceFormatWrapper&) =
56 ConvertableToTraceFormatWrapper& operator=(
57 const ConvertableToTraceFormatWrapper&) = delete;
58 ~ConvertableToTraceFormatWrapper() override = default;
59 void AppendAsTraceFormat(std::string* out) const final {
60 inner_->AppendAsTraceFormat(out);
64 std::unique_ptr<v8::ConvertableToTraceFormat> inner_;
67 class EnabledStateObserverImpl final
68 : public base::trace_event::TraceLog::EnabledStateObserver {
70 EnabledStateObserverImpl() {
71 base::trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
74 EnabledStateObserverImpl(const EnabledStateObserverImpl&) = delete;
76 EnabledStateObserverImpl& operator=(const EnabledStateObserverImpl&) = delete;
78 ~EnabledStateObserverImpl() override {
79 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(
83 void OnTraceLogEnabled() final {
84 base::AutoLock lock(mutex_);
85 for (auto* o : observers_) {
90 void OnTraceLogDisabled() final {
91 base::AutoLock lock(mutex_);
92 for (auto* o : observers_) {
97 void AddObserver(v8::TracingController::TraceStateObserver* observer) {
99 base::AutoLock lock(mutex_);
100 DCHECK(!observers_.count(observer));
101 observers_.insert(observer);
104 // Fire the observer if recording is already in progress.
105 if (base::trace_event::TraceLog::GetInstance()->IsEnabled())
106 observer->OnTraceEnabled();
109 void RemoveObserver(v8::TracingController::TraceStateObserver* observer) {
110 base::AutoLock lock(mutex_);
111 DCHECK(observers_.count(observer) == 1);
112 observers_.erase(observer);
117 std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
120 base::LazyInstance<EnabledStateObserverImpl>::Leaky g_trace_state_dispatcher =
121 LAZY_INSTANCE_INITIALIZER;
123 // TODO(skyostil): Deduplicate this with the clamper in Blink.
126 // As site isolation is enabled on desktop platforms, we can safely provide
127 // more timing resolution. Jittering is still enabled everywhere.
128 #if BUILDFLAG(IS_ANDROID)
129 static constexpr double kResolutionSeconds = 100e-6;
131 static constexpr double kResolutionSeconds = 5e-6;
134 TimeClamper() : secret_(base::RandUint64()) {}
135 TimeClamper(const TimeClamper&) = delete;
136 TimeClamper& operator=(const TimeClamper&) = delete;
138 double ClampTimeResolution(double time_seconds) const {
139 bool was_negative = false;
140 if (time_seconds < 0) {
142 time_seconds = -time_seconds;
144 // For each clamped time interval, compute a pseudorandom transition
145 // threshold. The reported time will either be the start of that interval or
146 // the next one depending on which side of the threshold |time_seconds| is.
147 double interval = floor(time_seconds / kResolutionSeconds);
148 double clamped_time = interval * kResolutionSeconds;
149 double tick_threshold = ThresholdFor(clamped_time);
151 if (time_seconds >= tick_threshold)
152 clamped_time = (interval + 1) * kResolutionSeconds;
154 clamped_time = -clamped_time;
159 inline double ThresholdFor(double clamped_time) const {
161 MurmurHash3(base::bit_cast<int64_t>(clamped_time) ^ secret_);
162 return clamped_time + kResolutionSeconds * ToDouble(time_hash);
165 static inline double ToDouble(uint64_t value) {
166 // Exponent for double values for [1.0 .. 2.0]
167 static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
168 static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
169 uint64_t random = (value & kMantissaMask) | kExponentBits;
170 return base::bit_cast<double>(random) - 1;
173 static inline uint64_t MurmurHash3(uint64_t value) {
174 value ^= value >> 33;
175 value *= uint64_t{0xFF51AFD7ED558CCD};
176 value ^= value >> 33;
177 value *= uint64_t{0xC4CEB9FE1A85EC53};
178 value ^= value >> 33;
182 const uint64_t secret_;
185 base::LazyInstance<TimeClamper>::Leaky g_time_clamper =
186 LAZY_INSTANCE_INITIALIZER;
188 #if BUILDFLAG(USE_PARTITION_ALLOC)
190 base::LazyInstance<gin::PageAllocator>::Leaky g_page_allocator =
191 LAZY_INSTANCE_INITIALIZER;
193 #endif // BUILDFLAG(USE_PARTITION_ALLOC)
195 class JobDelegateImpl : public v8::JobDelegate {
197 explicit JobDelegateImpl(base::JobDelegate* delegate) : delegate_(delegate) {}
198 JobDelegateImpl() = default;
200 JobDelegateImpl(const JobDelegateImpl&) = delete;
201 JobDelegateImpl& operator=(const JobDelegateImpl&) = delete;
204 bool ShouldYield() override { return delegate_->ShouldYield(); }
205 void NotifyConcurrencyIncrease() override {
206 delegate_->NotifyConcurrencyIncrease();
208 uint8_t GetTaskId() override { return delegate_->GetTaskId(); }
209 bool IsJoiningThread() const override { return delegate_->IsJoiningThread(); }
212 raw_ptr<base::JobDelegate> delegate_;
215 class JobHandleImpl : public v8::JobHandle {
217 explicit JobHandleImpl(base::JobHandle handle) : handle_(std::move(handle)) {}
218 ~JobHandleImpl() override = default;
220 JobHandleImpl(const JobHandleImpl&) = delete;
221 JobHandleImpl& operator=(const JobHandleImpl&) = delete;
224 void NotifyConcurrencyIncrease() override {
225 handle_.NotifyConcurrencyIncrease();
227 bool UpdatePriorityEnabled() const override { return true; }
228 void UpdatePriority(v8::TaskPriority new_priority) override {
229 handle_.UpdatePriority(ToBaseTaskPriority(new_priority));
231 void Join() override { handle_.Join(); }
232 void Cancel() override { handle_.Cancel(); }
233 void CancelAndDetach() override { handle_.CancelAndDetach(); }
234 bool IsActive() override { return handle_.IsActive(); }
235 bool IsValid() override { return !!handle_; }
238 static base::TaskPriority ToBaseTaskPriority(v8::TaskPriority priority) {
240 case v8::TaskPriority::kBestEffort:
241 return base::TaskPriority::BEST_EFFORT;
242 case v8::TaskPriority::kUserVisible:
243 return base::TaskPriority::USER_VISIBLE;
244 case v8::TaskPriority::kUserBlocking:
245 return base::TaskPriority::USER_BLOCKING;
249 base::JobHandle handle_;
257 namespace trace_event {
259 // Allow std::unique_ptr<v8::ConvertableToTraceFormat> to be a valid
260 // initialization value for trace macros.
262 struct base::trace_event::TraceValue::Helper<
263 std::unique_ptr<v8::ConvertableToTraceFormat>> {
264 static constexpr unsigned char kType = TRACE_VALUE_TYPE_CONVERTABLE;
265 static inline void SetValue(
267 std::unique_ptr<v8::ConvertableToTraceFormat> value) {
268 // NOTE: |as_convertable| is an owning pointer, so using new here
271 new gin::ConvertableToTraceFormatWrapper(std::move(value));
275 } // namespace trace_event
280 class V8Platform::TracingControllerImpl : public v8::TracingController {
282 TracingControllerImpl() = default;
283 TracingControllerImpl(const TracingControllerImpl&) = delete;
284 TracingControllerImpl& operator=(const TracingControllerImpl&) = delete;
285 ~TracingControllerImpl() override = default;
287 // TracingController implementation.
288 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
289 const uint8_t* GetCategoryGroupEnabled(const char* name) override {
290 return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(name);
292 uint64_t AddTraceEvent(
294 const uint8_t* category_enabled_flag,
300 const char** arg_names,
301 const uint8_t* arg_types,
302 const uint64_t* arg_values,
303 std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
304 unsigned int flags) override {
305 base::trace_event::TraceArguments args(
306 num_args, arg_names, arg_types,
307 reinterpret_cast<const unsigned long long*>(arg_values),
309 DCHECK_LE(num_args, 2);
310 base::trace_event::TraceEventHandle handle =
311 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID(
312 phase, category_enabled_flag, name, scope, id, bind_id, &args,
315 memcpy(&result, &handle, sizeof(result));
318 uint64_t AddTraceEventWithTimestamp(
320 const uint8_t* category_enabled_flag,
326 const char** arg_names,
327 const uint8_t* arg_types,
328 const uint64_t* arg_values,
329 std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
331 int64_t timestampMicroseconds) override {
332 base::trace_event::TraceArguments args(
333 num_args, arg_names, arg_types,
334 reinterpret_cast<const unsigned long long*>(arg_values),
336 DCHECK_LE(num_args, 2);
337 base::TimeTicks timestamp =
338 base::TimeTicks() + base::Microseconds(timestampMicroseconds);
339 base::trace_event::TraceEventHandle handle =
340 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
341 phase, category_enabled_flag, name, scope, id, bind_id,
342 TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, &args, flags);
344 memcpy(&result, &handle, sizeof(result));
347 void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
349 uint64_t handle) override {
350 base::trace_event::TraceEventHandle traceEventHandle;
351 memcpy(&traceEventHandle, &handle, sizeof(handle));
352 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_enabled_flag, name,
355 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
356 void AddTraceStateObserver(TraceStateObserver* observer) override {
357 g_trace_state_dispatcher.Get().AddObserver(observer);
359 void RemoveTraceStateObserver(TraceStateObserver* observer) override {
360 g_trace_state_dispatcher.Get().RemoveObserver(observer);
365 V8Platform* V8Platform::Get() { return g_v8_platform.Pointer(); }
367 V8Platform::V8Platform() : tracing_controller_(new TracingControllerImpl) {}
369 V8Platform::~V8Platform() = default;
371 #if BUILDFLAG(USE_PARTITION_ALLOC)
372 PageAllocator* V8Platform::GetPageAllocator() {
373 return g_page_allocator.Pointer();
376 void V8Platform::OnCriticalMemoryPressure() {
377 // We only have a reservation on 32-bit Windows systems.
378 // TODO(bbudge) Make the #if's in BlinkInitializer match.
379 #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_32_BITS)
380 partition_alloc::ReleaseReservation();
384 v8::ZoneBackingAllocator* V8Platform::GetZoneBackingAllocator() {
385 static struct Allocator final : v8::ZoneBackingAllocator {
386 MallocFn GetMallocFn() const override {
387 return &base::AllocNonQuarantinable;
389 FreeFn GetFreeFn() const override { return &base::FreeNonQuarantinable; }
393 #endif // BUILDFLAG(USE_PARTITION_ALLOC)
395 std::shared_ptr<v8::TaskRunner> V8Platform::GetForegroundTaskRunner(
396 v8::Isolate* isolate) {
397 PerIsolateData* data = PerIsolateData::From(isolate);
398 return data->task_runner();
401 int V8Platform::NumberOfWorkerThreads() {
402 // V8Platform assumes the scheduler uses the same set of workers for default
403 // and user blocking tasks.
404 const size_t num_foreground_workers =
405 base::ThreadPoolInstance::Get()
406 ->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
408 DCHECK_EQ(num_foreground_workers,
409 base::ThreadPoolInstance::Get()
410 ->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
411 kBlockingTaskTraits));
412 return std::max(1, static_cast<int>(num_foreground_workers));
415 void V8Platform::CallOnWorkerThread(std::unique_ptr<v8::Task> task) {
416 base::ThreadPool::PostTask(FROM_HERE, kDefaultTaskTraits,
417 base::BindOnce(&v8::Task::Run, std::move(task)));
420 void V8Platform::CallBlockingTaskOnWorkerThread(
421 std::unique_ptr<v8::Task> task) {
422 base::ThreadPool::PostTask(FROM_HERE, kBlockingTaskTraits,
423 base::BindOnce(&v8::Task::Run, std::move(task)));
426 void V8Platform::CallLowPriorityTaskOnWorkerThread(
427 std::unique_ptr<v8::Task> task) {
428 base::ThreadPool::PostTask(FROM_HERE, kLowPriorityTaskTraits,
429 base::BindOnce(&v8::Task::Run, std::move(task)));
432 void V8Platform::CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
433 double delay_in_seconds) {
434 base::ThreadPool::PostDelayedTask(
435 FROM_HERE, kDefaultTaskTraits,
436 base::BindOnce(&v8::Task::Run, std::move(task)),
437 base::Seconds(delay_in_seconds));
440 std::unique_ptr<v8::JobHandle> V8Platform::CreateJob(
441 v8::TaskPriority priority,
442 std::unique_ptr<v8::JobTask> job_task) {
443 base::TaskTraits task_traits;
445 case v8::TaskPriority::kBestEffort:
446 task_traits = kLowPriorityTaskTraits;
448 case v8::TaskPriority::kUserVisible:
449 task_traits = kDefaultTaskTraits;
451 case v8::TaskPriority::kUserBlocking:
452 task_traits = kBlockingTaskTraits;
455 // Ownership of |job_task| is assumed by |worker_task|, while
456 // |max_concurrency_callback| uses an unretained pointer.
457 auto* job_task_ptr = job_task.get();
459 base::CreateJob(FROM_HERE, task_traits,
461 [](const std::unique_ptr<v8::JobTask>& job_task,
462 base::JobDelegate* delegate) {
463 JobDelegateImpl delegate_impl(delegate);
464 job_task->Run(&delegate_impl);
466 std::move(job_task)),
468 [](v8::JobTask* job_task, size_t worker_count) {
469 return job_task->GetMaxConcurrency(worker_count);
471 base::Unretained(job_task_ptr)));
473 return std::make_unique<JobHandleImpl>(std::move(handle));
476 bool V8Platform::IdleTasksEnabled(v8::Isolate* isolate) {
477 return PerIsolateData::From(isolate)->task_runner()->IdleTasksEnabled();
480 double V8Platform::MonotonicallyIncreasingTime() {
481 return base::TimeTicks::Now().ToInternalValue() /
482 static_cast<double>(base::Time::kMicrosecondsPerSecond);
485 double V8Platform::CurrentClockTimeMillis() {
486 double now_seconds = base::Time::Now().ToJsTime() / 1000;
487 return g_time_clamper.Get().ClampTimeResolution(now_seconds) * 1000;
490 v8::TracingController* V8Platform::GetTracingController() {
491 return tracing_controller_.get();
494 v8::Platform::StackTracePrinter V8Platform::GetStackTracePrinter() {
495 return PrintStackTrace;