1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
11 #include "include/v8-debug.h"
12 #include "src/allocation.h"
13 #include "src/assert-scope.h"
14 #include "src/base/atomicops.h"
15 #include "src/builtins.h"
16 #include "src/cancelable-task.h"
17 #include "src/contexts.h"
19 #include "src/execution.h"
20 #include "src/frames.h"
21 #include "src/futex-emulation.h"
22 #include "src/global-handles.h"
23 #include "src/handles.h"
24 #include "src/hashmap.h"
25 #include "src/heap/heap.h"
26 #include "src/messages.h"
27 #include "src/optimizing-compile-dispatcher.h"
28 #include "src/regexp/regexp-stack.h"
29 #include "src/runtime/runtime.h"
30 #include "src/runtime-profiler.h"
36 class RandomNumberGenerator;
41 class BasicBlockProfiler;
43 class CallInterfaceDescriptorData;
46 class CodeStubDescriptor;
48 class CompilationCache;
49 class CompilationStatistics;
50 class ContextSlotCache;
54 class DeoptimizerData;
57 class ExternalCallbackScope;
58 class ExternalReferenceTable;
60 class FunctionInfoListener;
61 class HandleScopeImplementer;
65 class InlineRuntimeFunctionsTable;
66 class InnerPointerToCodeCache;
68 class MaterializedObjectStore;
69 class CodeAgingHelper;
78 class ThreadVisitor; // Defined in v8threads.h
80 template <StateTag Tag> class VMState;
82 // 'void function pointer', used to roundtrip the
83 // ExternalReference::ExternalReferenceRedirector since we can not include
84 // assembler.h, where it is defined, here.
85 typedef void* ExternalReferenceRedirectorPointer();
93 namespace interpreter {
97 // Static indirection table for handles to constants. If a frame
98 // element represents a constant, the data contains an index into
99 // this table of handles to the actual constants.
100 // Static indirection table for handles to constants. If a Result
101 // represents a constant, the data contains an index into this table
102 // of handles to the actual constants.
103 typedef ZoneList<Handle<Object> > ZoneObjectList;
105 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
107 Isolate* __isolate__ = (isolate); \
108 if (__isolate__->has_scheduled_exception()) { \
109 return __isolate__->PromoteScheduledException(); \
113 // Macros for MaybeHandle.
115 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
117 Isolate* __isolate__ = (isolate); \
118 if (__isolate__->has_scheduled_exception()) { \
119 __isolate__->PromoteScheduledException(); \
124 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
125 RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
127 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
129 if (!(call).ToHandle(&dst)) { \
130 DCHECK((isolate)->has_pending_exception()); \
135 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
136 ASSIGN_RETURN_ON_EXCEPTION_VALUE( \
137 isolate, dst, call, isolate->heap()->exception())
139 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
140 ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
142 #define THROW_NEW_ERROR(isolate, call, T) \
144 return isolate->Throw<T>(isolate->factory()->call); \
147 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
149 return isolate->Throw(*isolate->factory()->call); \
152 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
154 if ((call).is_null()) { \
155 DCHECK((isolate)->has_pending_exception()); \
160 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
161 RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception())
163 #define RETURN_ON_EXCEPTION(isolate, call, T) \
164 RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
167 #define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
168 C(Handler, handler) \
169 C(CEntryFP, c_entry_fp) \
170 C(CFunction, c_function) \
171 C(Context, context) \
172 C(PendingException, pending_exception) \
173 C(PendingHandlerContext, pending_handler_context) \
174 C(PendingHandlerCode, pending_handler_code) \
175 C(PendingHandlerOffset, pending_handler_offset) \
176 C(PendingHandlerFP, pending_handler_fp) \
177 C(PendingHandlerSP, pending_handler_sp) \
178 C(ExternalCaughtException, external_caught_exception) \
179 C(JSEntrySP, js_entry_sp)
182 // Platform-independent, reliable thread identifier.
185 // Creates an invalid ThreadId.
186 ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); }
188 ThreadId& operator=(const ThreadId& other) {
189 base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_));
193 // Returns ThreadId for current thread.
194 static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
196 // Returns invalid ThreadId (guaranteed not to be equal to any thread).
197 static ThreadId Invalid() { return ThreadId(kInvalidId); }
199 // Compares ThreadIds for equality.
200 INLINE(bool Equals(const ThreadId& other) const) {
201 return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_);
204 // Checks whether this ThreadId refers to any thread.
205 INLINE(bool IsValid() const) {
206 return base::NoBarrier_Load(&id_) != kInvalidId;
209 // Converts ThreadId to an integer representation
210 // (required for public API: V8::V8::GetCurrentThreadId).
211 int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); }
213 // Converts ThreadId to an integer representation
214 // (required for public API: V8::V8::TerminateExecution).
215 static ThreadId FromInteger(int id) { return ThreadId(id); }
218 static const int kInvalidId = -1;
220 explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); }
222 static int AllocateThreadId();
224 static int GetCurrentThreadId();
228 static base::Atomic32 highest_thread_id_;
230 friend class Isolate;
234 #define FIELD_ACCESSOR(type, name) \
235 inline void set_##name(type v) { name##_ = v; } \
236 inline type name() const { return name##_; }
239 class ThreadLocalTop BASE_EMBEDDED {
241 // Does early low-level initialization that does not depend on the
242 // isolate being present.
245 // Initialize the thread data.
248 // Get the top C++ try catch handler or NULL if none are registered.
250 // This method is not guaranteed to return an address that can be
251 // used for comparison with addresses into the JS stack. If such an
252 // address is needed, use try_catch_handler_address.
253 FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
255 // Get the address of the top C++ try catch handler or NULL if
256 // none are registered.
258 // This method always returns an address that can be compared to
259 // pointers into the JavaScript stack. When running on actual
260 // hardware, try_catch_handler_address and TryCatchHandler return
261 // the same pointer. When running on a simulator with a separate JS
262 // stack, try_catch_handler_address returns a JS stack address that
263 // corresponds to the place on the JS stack where the C++ handler
264 // would have been if the stack were not separate.
265 Address try_catch_handler_address() {
266 return reinterpret_cast<Address>(
267 v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
273 // The context where the current execution method is created and for variable
277 Object* pending_exception_;
279 // Communication channel between Isolate::FindHandler and the CEntryStub.
280 Context* pending_handler_context_;
281 Code* pending_handler_code_;
282 intptr_t pending_handler_offset_;
283 Address pending_handler_fp_;
284 Address pending_handler_sp_;
286 // Communication channel between Isolate::Throw and message consumers.
287 bool rethrowing_message_;
288 Object* pending_message_obj_;
290 // Use a separate value for scheduled exceptions to preserve the
291 // invariants that hold about pending_exception. We may want to
293 Object* scheduled_exception_;
294 bool external_caught_exception_;
295 SaveContext* save_context_;
298 Address c_entry_fp_; // the frame pointer of the top c entry frame
299 Address handler_; // try-blocks are chained through the stack
300 Address c_function_; // C function that was called at c entry.
302 // Throwing an exception may cause a Promise rejection. For this purpose
303 // we keep track of a stack of nested promises and the corresponding
304 // try-catch handlers.
305 PromiseOnStack* promise_on_stack_;
308 Simulator* simulator_;
311 Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
312 // the external callback we're currently in
313 ExternalCallbackScope* external_callback_scope_;
314 StateTag current_vm_state_;
316 // Call back function to report unsafe JS accesses.
317 v8::FailedAccessCheckCallback failed_access_check_callback_;
320 void InitializeInternal();
322 v8::TryCatch* try_catch_handler_;
328 #define ISOLATE_INIT_SIMULATOR_LIST(V) \
329 V(bool, simulator_initialized, false) \
330 V(HashMap*, simulator_i_cache, NULL) \
331 V(Redirection*, simulator_redirection, NULL)
334 #define ISOLATE_INIT_SIMULATOR_LIST(V)
341 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
342 V(CommentStatistic, paged_space_comments_statistics, \
343 CommentStatistic::kMaxComments + 1) \
344 V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
347 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
351 #define ISOLATE_INIT_ARRAY_LIST(V) \
352 /* SerializerDeserializer state. */ \
353 V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
354 V(int, bad_char_shift_table, kUC16AlphabetSize) \
355 V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
356 V(int, suffix_table, (kBMMaxShift + 1)) \
357 V(uint32_t, private_random_seed, 2) \
358 ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
360 typedef List<HeapObject*> DebugObjectCache;
362 #define ISOLATE_INIT_LIST(V) \
363 /* Assembler state. */ \
364 V(FatalErrorCallback, exception_behavior, NULL) \
365 V(LogEventCallback, event_logger, NULL) \
366 V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
367 /* To distinguish the function templates, so that we can find them in the */ \
368 /* function cache of the native context. */ \
369 V(int, next_serial_number, 0) \
370 V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
371 /* Part of the state of liveedit. */ \
372 V(FunctionInfoListener*, active_function_info_listener, NULL) \
373 /* State for Relocatable. */ \
374 V(Relocatable*, relocatable_top, NULL) \
375 V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
376 V(Object*, string_stream_current_security_token, NULL) \
377 V(ExternalReferenceTable*, external_reference_table, NULL) \
378 V(HashMap*, external_reference_map, NULL) \
379 V(HashMap*, root_index_map, NULL) \
380 V(int, pending_microtask_count, 0) \
381 V(bool, autorun_microtasks, true) \
382 V(HStatistics*, hstatistics, NULL) \
383 V(CompilationStatistics*, turbo_statistics, NULL) \
384 V(HTracer*, htracer, NULL) \
385 V(CodeTracer*, code_tracer, NULL) \
386 V(bool, fp_stubs_generated, false) \
387 V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
388 V(PromiseRejectCallback, promise_reject_callback, NULL) \
389 V(const v8::StartupData*, snapshot_blob, NULL) \
390 ISOLATE_INIT_SIMULATOR_LIST(V)
392 #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
393 inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
394 inline type name() const { return thread_local_top_.name##_; }
396 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
397 type* name##_address() { return &thread_local_top_.name##_; }
401 // These forward declarations are required to make the friend declarations in
402 // PerIsolateThreadData work on some older versions of gcc.
403 class ThreadDataTable;
404 class EntryStackItem;
408 // A thread has a PerIsolateThreadData instance for each isolate that it has
409 // entered. That instance is allocated when the isolate is initially entered
410 // and reused on subsequent entries.
411 class PerIsolateThreadData {
413 PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
415 thread_id_(thread_id),
423 ~PerIsolateThreadData();
424 Isolate* isolate() const { return isolate_; }
425 ThreadId thread_id() const { return thread_id_; }
427 FIELD_ACCESSOR(uintptr_t, stack_limit)
428 FIELD_ACCESSOR(ThreadState*, thread_state)
431 FIELD_ACCESSOR(Simulator*, simulator)
434 bool Matches(Isolate* isolate, ThreadId thread_id) const {
435 return isolate_ == isolate && thread_id_.Equals(thread_id);
441 uintptr_t stack_limit_;
442 ThreadState* thread_state_;
445 Simulator* simulator_;
448 PerIsolateThreadData* next_;
449 PerIsolateThreadData* prev_;
451 friend class Isolate;
452 friend class ThreadDataTable;
453 friend class EntryStackItem;
455 DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
460 #define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
461 FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
466 static void InitializeOncePerProcess();
468 // Returns the PerIsolateThreadData for the current thread (or NULL if one is
469 // not currently set).
470 static PerIsolateThreadData* CurrentPerIsolateThreadData() {
471 return reinterpret_cast<PerIsolateThreadData*>(
472 base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
475 // Returns the isolate inside which the current thread is running.
476 INLINE(static Isolate* Current()) {
477 DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
478 Isolate* isolate = reinterpret_cast<Isolate*>(
479 base::Thread::GetExistingThreadLocal(isolate_key_));
480 DCHECK(isolate != NULL);
484 INLINE(static Isolate* UncheckedCurrent()) {
485 DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
486 return reinterpret_cast<Isolate*>(
487 base::Thread::GetThreadLocal(isolate_key_));
490 // Like UncheckedCurrent, but skips the check that |isolate_key_| was
491 // initialized. Callers have to ensure that themselves.
492 INLINE(static Isolate* UnsafeCurrent()) {
493 return reinterpret_cast<Isolate*>(
494 base::Thread::GetThreadLocal(isolate_key_));
497 // Usually called by Init(), but can be called early e.g. to allow
498 // testing components that require logging but not the whole
501 // Safe to call more than once.
502 void InitializeLoggingAndCounters();
504 bool Init(Deserializer* des);
506 // True if at least one thread Enter'ed this isolate.
507 bool IsInUse() { return entry_stack_ != NULL; }
509 // Destroys the non-default isolates.
510 // Sets default isolate into "has_been_disposed" state rather then destroying,
511 // for legacy API reasons.
514 static void GlobalTearDown();
516 void ClearSerializerData();
518 // Find the PerThread for this particular (isolate, thread) combination
519 // If one does not yet exist, return null.
520 PerIsolateThreadData* FindPerThreadDataForThisThread();
522 // Find the PerThread for given (isolate, thread) combination
523 // If one does not yet exist, return null.
524 PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
526 // Returns the key used to store the pointer to the current isolate.
527 // Used internally for V8 threads that do not execute JavaScript but still
528 // are part of the domain of an isolate (like the context switcher).
529 static base::Thread::LocalStorageKey isolate_key() {
533 // Returns the key used to store process-wide thread IDs.
534 static base::Thread::LocalStorageKey thread_id_key() {
535 return thread_id_key_;
538 static base::Thread::LocalStorageKey per_isolate_thread_data_key();
540 // Mutex for serializing access to break control structures.
541 base::RecursiveMutex* break_access() { return &break_access_; }
543 Address get_address_from_id(AddressId id);
545 // Access to top context (where the current function object was created).
546 Context* context() { return thread_local_top_.context_; }
547 inline void set_context(Context* context);
548 Context** context_address() { return &thread_local_top_.context_; }
550 THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
552 // Access to current thread id.
553 THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
555 // Interface to pending exception.
556 inline Object* pending_exception();
557 inline void set_pending_exception(Object* exception_obj);
558 inline void clear_pending_exception();
560 THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
562 inline bool has_pending_exception();
564 THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
565 THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
566 THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset)
567 THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
568 THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
570 THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
572 v8::TryCatch* try_catch_handler() {
573 return thread_local_top_.try_catch_handler();
575 bool* external_caught_exception_address() {
576 return &thread_local_top_.external_caught_exception_;
579 THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
581 inline void clear_pending_message();
582 Address pending_message_obj_address() {
583 return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
586 inline Object* scheduled_exception();
587 inline bool has_scheduled_exception();
588 inline void clear_scheduled_exception();
590 bool IsJavaScriptHandlerOnTop(Object* exception);
591 bool IsExternalHandlerOnTop(Object* exception);
593 inline bool is_catchable_by_javascript(Object* exception);
595 // JS execution stack (see frames.h).
596 static Address c_entry_fp(ThreadLocalTop* thread) {
597 return thread->c_entry_fp_;
599 static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
600 Address c_function() { return thread_local_top_.c_function_; }
602 inline Address* c_entry_fp_address() {
603 return &thread_local_top_.c_entry_fp_;
605 inline Address* handler_address() { return &thread_local_top_.handler_; }
606 inline Address* c_function_address() {
607 return &thread_local_top_.c_function_;
611 Address js_entry_sp() {
612 return thread_local_top_.js_entry_sp_;
614 inline Address* js_entry_sp_address() {
615 return &thread_local_top_.js_entry_sp_;
618 // Returns the global object of the current context. It could be
619 // a builtin object, or a JS global object.
620 inline Handle<GlobalObject> global_object();
622 // Returns the global proxy object of the current context.
623 JSObject* global_proxy() {
624 return context()->global_proxy();
627 static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
628 void FreeThreadResources() { thread_local_top_.Free(); }
630 // This method is called by the api after operations that may throw
631 // exceptions. If an exception was thrown and not handled by an external
632 // handler the exception is scheduled to be rethrown when we return to running
633 // JavaScript code. If an exception is scheduled true is returned.
634 bool OptionalRescheduleException(bool is_bottom_call);
636 // Push and pop a promise and the current try-catch handler.
637 void PushPromise(Handle<JSObject> promise, Handle<JSFunction> function);
639 Handle<Object> GetPromiseOnStackOnThrow();
641 class ExceptionScope {
643 // Scope currently can only be used for regular exceptions,
644 // not termination exception.
645 inline explicit ExceptionScope(Isolate* isolate);
646 inline ~ExceptionScope();
650 Handle<Object> pending_exception_;
653 void SetCaptureStackTraceForUncaughtExceptions(
656 StackTrace::StackTraceOptions options);
658 enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
659 void PrintCurrentStackTrace(FILE* out);
660 void PrintStack(StringStream* accumulator,
661 PrintStackMode mode = kPrintStackVerbose);
662 void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
663 Handle<String> StackTraceString();
664 NO_INLINE(void PushStackTraceAndDie(unsigned int magic, void* ptr1,
665 void* ptr2, unsigned int magic2));
666 Handle<JSArray> CaptureCurrentStackTrace(
668 StackTrace::StackTraceOptions options);
669 Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object,
670 Handle<Object> caller);
671 MaybeHandle<JSObject> CaptureAndSetDetailedStackTrace(
672 Handle<JSObject> error_object);
673 MaybeHandle<JSObject> CaptureAndSetSimpleStackTrace(
674 Handle<JSObject> error_object, Handle<Object> caller);
675 Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
676 Handle<JSArray> GetDetailedFromSimpleStackTrace(
677 Handle<JSObject> error_object);
679 // Returns if the top context may access the given global object. If
680 // the result is false, the pending exception is guaranteed to be
683 bool MayAccess(Handle<JSObject> receiver);
684 bool IsInternallyUsedPropertyName(Handle<Object> name);
685 bool IsInternallyUsedPropertyName(Object* name);
687 void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
688 void ReportFailedAccessCheck(Handle<JSObject> receiver);
690 // Exception throwing support. The caller should use the result
691 // of Throw() as its return value.
692 Object* Throw(Object* exception, MessageLocation* location = NULL);
693 Object* ThrowIllegalOperation();
695 template <typename T>
696 MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
697 MessageLocation* location = NULL) {
698 Throw(*exception, location);
699 return MaybeHandle<T>();
702 // Re-throw an exception. This involves no error reporting since error
703 // reporting was handled when the exception was thrown originally.
704 Object* ReThrow(Object* exception);
706 // Find the correct handler for the current pending exception. This also
707 // clears and returns the current pending exception.
708 Object* UnwindAndFindHandler();
710 // Tries to predict whether an exception will be caught. Note that this can
711 // only produce an estimate, because it is undecidable whether a finally
712 // clause will consume or re-throw an exception. We conservatively assume any
713 // finally clause will behave as if the exception were consumed.
714 enum CatchType { NOT_CAUGHT, CAUGHT_BY_JAVASCRIPT, CAUGHT_BY_EXTERNAL };
715 CatchType PredictExceptionCatcher();
717 void ScheduleThrow(Object* exception);
718 // Re-set pending message, script and positions reported to the TryCatch
719 // back to the TLS for re-use when rethrowing.
720 void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
721 // Un-schedule an exception that was caught by a TryCatch handler.
722 void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
723 void ReportPendingMessages();
724 // Return pending location if any or unfilled structure.
725 MessageLocation GetMessageLocation();
727 // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
728 Object* PromoteScheduledException();
730 // Attempts to compute the current source location, storing the
731 // result in the target out parameter.
732 bool ComputeLocation(MessageLocation* target);
733 bool ComputeLocationFromException(MessageLocation* target,
734 Handle<Object> exception);
735 bool ComputeLocationFromStackTrace(MessageLocation* target,
736 Handle<Object> exception);
738 Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
739 MessageLocation* location);
741 // Out of resource exception helpers.
742 Object* StackOverflow();
743 Object* TerminateExecution();
744 void CancelTerminateExecution();
746 void RequestInterrupt(InterruptCallback callback, void* data);
747 void InvokeApiInterruptCallbacks();
750 void Iterate(ObjectVisitor* v);
751 void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
752 char* Iterate(ObjectVisitor* v, char* t);
753 void IterateThread(ThreadVisitor* v, char* t);
755 // Returns the current native context.
756 Handle<Context> native_context();
758 // Returns the native context of the calling JavaScript code. That
759 // is, the native context of the top-most JavaScript frame.
760 Handle<Context> GetCallingNativeContext();
762 void RegisterTryCatchHandler(v8::TryCatch* that);
763 void UnregisterTryCatchHandler(v8::TryCatch* that);
765 char* ArchiveThread(char* to);
766 char* RestoreThread(char* from);
768 static const char* const kStackOverflowMessage;
770 static const int kUC16AlphabetSize = 256; // See StringSearchBase.
771 static const int kBMMaxShift = 250; // See StringSearchBase.
774 #define GLOBAL_ACCESSOR(type, name, initialvalue) \
775 inline type name() const { \
776 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
779 inline void set_##name(type value) { \
780 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
783 ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
784 #undef GLOBAL_ACCESSOR
786 #define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
787 inline type* name() { \
788 DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
789 return &(name##_)[0]; \
791 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
792 #undef GLOBAL_ARRAY_ACCESSOR
794 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
795 inline Handle<type> name(); \
796 inline bool is_##name(type* value);
797 NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
798 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
800 Bootstrapper* bootstrapper() { return bootstrapper_; }
801 Counters* counters() {
802 // Call InitializeLoggingAndCounters() if logging is needed before
803 // the isolate is fully initialized.
804 DCHECK(counters_ != NULL);
807 CodeRange* code_range() { return code_range_; }
808 RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
809 CompilationCache* compilation_cache() { return compilation_cache_; }
811 // Call InitializeLoggingAndCounters() if logging is needed before
812 // the isolate is fully initialized.
813 DCHECK(logger_ != NULL);
816 StackGuard* stack_guard() { return &stack_guard_; }
817 Heap* heap() { return &heap_; }
818 StatsTable* stats_table();
819 StubCache* stub_cache() { return stub_cache_; }
820 CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
821 DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
822 ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
823 MaterializedObjectStore* materialized_object_store() {
824 return materialized_object_store_;
827 MemoryAllocator* memory_allocator() {
828 return memory_allocator_;
831 KeyedLookupCache* keyed_lookup_cache() {
832 return keyed_lookup_cache_;
835 ContextSlotCache* context_slot_cache() {
836 return context_slot_cache_;
839 DescriptorLookupCache* descriptor_lookup_cache() {
840 return descriptor_lookup_cache_;
843 HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
845 HandleScopeImplementer* handle_scope_implementer() {
846 DCHECK(handle_scope_implementer_);
847 return handle_scope_implementer_;
849 Zone* runtime_zone() { return &runtime_zone_; }
850 Zone* interface_descriptor_zone() { return &interface_descriptor_zone_; }
852 UnicodeCache* unicode_cache() {
853 return unicode_cache_;
856 InnerPointerToCodeCache* inner_pointer_to_code_cache() {
857 return inner_pointer_to_code_cache_;
860 GlobalHandles* global_handles() { return global_handles_; }
862 EternalHandles* eternal_handles() { return eternal_handles_; }
864 ThreadManager* thread_manager() { return thread_manager_; }
866 unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
867 return &jsregexp_uncanonicalize_;
870 unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
871 return &jsregexp_canonrange_;
874 RuntimeState* runtime_state() { return &runtime_state_; }
876 Builtins* builtins() { return &builtins_; }
878 void NotifyExtensionInstalled() {
879 has_installed_extensions_ = true;
882 bool has_installed_extensions() { return has_installed_extensions_; }
884 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
885 regexp_macro_assembler_canonicalize() {
886 return ®exp_macro_assembler_canonicalize_;
889 RegExpStack* regexp_stack() { return regexp_stack_; }
891 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
892 interp_canonicalize_mapping() {
893 return &interp_canonicalize_mapping_;
896 Debug* debug() { return debug_; }
898 CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
899 HeapProfiler* heap_profiler() const { return heap_profiler_; }
902 HistogramInfo* heap_histograms() { return heap_histograms_; }
904 JSObject::SpillInformation* js_spill_information() {
905 return &js_spill_information_;
909 Factory* factory() { return reinterpret_cast<Factory*>(this); }
911 static const int kJSRegexpStaticOffsetsVectorSize = 128;
913 THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
915 THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
917 void SetData(uint32_t slot, void* data) {
918 DCHECK(slot < Internals::kNumIsolateDataSlots);
919 embedder_data_[slot] = data;
921 void* GetData(uint32_t slot) {
922 DCHECK(slot < Internals::kNumIsolateDataSlots);
923 return embedder_data_[slot];
926 bool serializer_enabled() const { return serializer_enabled_; }
927 bool snapshot_available() const {
928 return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0;
931 bool IsDead() { return has_fatal_error_; }
932 void SignalFatalError() { has_fatal_error_ = true; }
934 bool use_crankshaft() const;
936 bool initialized_from_snapshot() { return initialized_from_snapshot_; }
938 double time_millis_since_init() {
939 return base::OS::TimeCurrentMillis() - time_millis_at_init_;
942 DateCache* date_cache() {
946 void set_date_cache(DateCache* date_cache) {
947 if (date_cache != date_cache_) {
950 date_cache_ = date_cache;
953 ErrorToStringHelper* error_tostring_helper() {
954 return &error_tostring_helper_;
957 Map* get_initial_js_array_map(ElementsKind kind,
958 Strength strength = Strength::WEAK);
960 static const int kArrayProtectorValid = 1;
961 static const int kArrayProtectorInvalid = 0;
963 bool IsFastArrayConstructorPrototypeChainIntact();
965 // On intent to set an element in object, make sure that appropriate
966 // notifications occur if the set is on the elements of the array or
967 // object prototype. Also ensure that changes to prototype chain between
968 // Array and Object fire notifications.
969 void UpdateArrayProtectorOnSetElement(Handle<JSObject> object);
970 void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) {
971 UpdateArrayProtectorOnSetElement(object);
973 void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) {
974 UpdateArrayProtectorOnSetElement(object);
976 void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
977 UpdateArrayProtectorOnSetElement(object);
980 // Returns true if array is the initial array prototype in any native context.
981 bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
983 CallInterfaceDescriptorData* call_descriptor_data(int index);
985 void IterateDeferredHandles(ObjectVisitor* visitor);
986 void LinkDeferredHandles(DeferredHandles* deferred_handles);
987 void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
990 bool IsDeferredHandle(Object** location);
993 bool concurrent_recompilation_enabled() {
994 // Thread is only available with flag enabled.
995 DCHECK(optimizing_compile_dispatcher_ == NULL ||
996 FLAG_concurrent_recompilation);
997 return optimizing_compile_dispatcher_ != NULL;
1000 bool concurrent_osr_enabled() const {
1001 // Thread is only available with flag enabled.
1002 DCHECK(optimizing_compile_dispatcher_ == NULL ||
1003 FLAG_concurrent_recompilation);
1004 return optimizing_compile_dispatcher_ != NULL && FLAG_concurrent_osr;
1007 OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1008 return optimizing_compile_dispatcher_;
1011 int id() const { return static_cast<int>(id_); }
1013 HStatistics* GetHStatistics();
1014 CompilationStatistics* GetTurboStatistics();
1015 HTracer* GetHTracer();
1016 CodeTracer* GetCodeTracer();
1018 void DumpAndResetCompilationStats();
1020 FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
1021 void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
1022 function_entry_hook_ = function_entry_hook;
1025 void* stress_deopt_count_address() { return &stress_deopt_count_; }
1027 void* vector_store_virtual_register_address() {
1028 return &vector_store_virtual_register_;
1031 base::RandomNumberGenerator* random_number_generator();
1033 // Given an address occupied by a live code object, return that object.
1034 Object* FindCodeObject(Address a);
1036 int NextOptimizationId() {
1037 int id = next_optimization_id_++;
1038 if (!Smi::IsValid(next_optimization_id_)) {
1039 next_optimization_id_ = 0;
1044 // Get (and lazily initialize) the registry for per-isolate symbols.
1045 Handle<JSObject> GetSymbolRegistry();
1047 void AddCallCompletedCallback(CallCompletedCallback callback);
1048 void RemoveCallCompletedCallback(CallCompletedCallback callback);
1049 void FireCallCompletedCallback();
1051 void SetPromiseRejectCallback(PromiseRejectCallback callback);
1052 void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
1053 v8::PromiseRejectEvent event);
1055 void EnqueueMicrotask(Handle<Object> microtask);
1056 void RunMicrotasks();
1058 void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1059 void CountUsage(v8::Isolate::UseCounterFeature feature);
1061 BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
1062 BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }
1064 std::string GetTurboCfgFileName();
1067 int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
1071 void AddDetachedContext(Handle<Context> context);
1072 void CheckDetachedContextsAfterGC();
1074 List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; }
1076 void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1077 array_buffer_allocator_ = allocator;
1079 v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1080 return array_buffer_allocator_;
1083 FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1085 void RegisterCancelableTask(Cancelable* task);
1086 void RemoveCancelableTask(Cancelable* task);
1088 interpreter::Interpreter* interpreter() const { return interpreter_; }
1091 explicit Isolate(bool enable_serializer);
1094 friend struct GlobalState;
1095 friend struct InitializeGlobalState;
1096 Handle<JSObject> SetUpSubregistry(Handle<JSObject> registry, Handle<Map> map,
1099 // These fields are accessed through the API, offsets must be kept in sync
1100 // with v8::internal::Internals (in include/v8.h) constants. This is also
1101 // verified in Isolate::Init() using runtime checks.
1102 void* embedder_data_[Internals::kNumIsolateDataSlots];
1105 // The per-process lock should be acquired before the ThreadDataTable is
1107 class ThreadDataTable {
1112 PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
1113 void Insert(PerIsolateThreadData* data);
1114 void Remove(PerIsolateThreadData* data);
1115 void RemoveAllThreads(Isolate* isolate);
1118 PerIsolateThreadData* list_;
1121 // These items form a stack synchronously with threads Enter'ing and Exit'ing
1122 // the Isolate. The top of the stack points to a thread which is currently
1123 // running the Isolate. When the stack is empty, the Isolate is considered
1124 // not entered by any thread and can be Disposed.
1125 // If the same thread enters the Isolate more then once, the entry_count_
1126 // is incremented rather then a new item pushed to the stack.
1127 class EntryStackItem {
1129 EntryStackItem(PerIsolateThreadData* previous_thread_data,
1130 Isolate* previous_isolate,
1131 EntryStackItem* previous_item)
1133 previous_thread_data(previous_thread_data),
1134 previous_isolate(previous_isolate),
1135 previous_item(previous_item) { }
1138 PerIsolateThreadData* previous_thread_data;
1139 Isolate* previous_isolate;
1140 EntryStackItem* previous_item;
1143 DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1146 static base::LazyMutex thread_data_table_mutex_;
1148 static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1149 static base::Thread::LocalStorageKey isolate_key_;
1150 static base::Thread::LocalStorageKey thread_id_key_;
1151 static ThreadDataTable* thread_data_table_;
1153 // A global counter for all generated Isolates, might overflow.
1154 static base::Atomic32 isolate_counter_;
1157 static base::Atomic32 isolate_key_created_;
1162 static void SetIsolateThreadLocals(Isolate* isolate,
1163 PerIsolateThreadData* data);
1165 // Find the PerThread for this particular (isolate, thread) combination.
1166 // If one does not yet exist, allocate a new one.
1167 PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
1169 // Initializes the current thread to run this Isolate.
1170 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1171 // at the same time, this should be prevented using external locking.
1174 // Exits the current thread. The previosuly entered Isolate is restored
1176 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1177 // at the same time, this should be prevented using external locking.
1180 void InitializeThreadLocal();
1182 void MarkCompactPrologue(bool is_compacting,
1183 ThreadLocalTop* archived_thread_data);
1184 void MarkCompactEpilogue(bool is_compacting,
1185 ThreadLocalTop* archived_thread_data);
1189 // Propagate pending exception message to the v8::TryCatch.
1190 // If there is no external try-catch or message was successfully propagated,
1191 // then return true.
1192 bool PropagatePendingExceptionToExternalTryCatch();
1194 // Remove per-frame stored materialized objects when we are unwinding
1196 void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
1198 // Traverse prototype chain to find out whether the object is derived from
1199 // the Error object.
1200 bool IsErrorObject(Handle<Object> obj);
1203 EntryStackItem* entry_stack_;
1204 int stack_trace_nesting_level_;
1205 StringStream* incomplete_message_;
1206 Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
1207 Bootstrapper* bootstrapper_;
1208 RuntimeProfiler* runtime_profiler_;
1209 CompilationCache* compilation_cache_;
1210 Counters* counters_;
1211 CodeRange* code_range_;
1212 base::RecursiveMutex break_access_;
1214 StackGuard stack_guard_;
1215 StatsTable* stats_table_;
1216 StubCache* stub_cache_;
1217 CodeAgingHelper* code_aging_helper_;
1218 DeoptimizerData* deoptimizer_data_;
1219 MaterializedObjectStore* materialized_object_store_;
1220 ThreadLocalTop thread_local_top_;
1221 bool capture_stack_trace_for_uncaught_exceptions_;
1222 int stack_trace_for_uncaught_exceptions_frame_limit_;
1223 StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1224 MemoryAllocator* memory_allocator_;
1225 KeyedLookupCache* keyed_lookup_cache_;
1226 ContextSlotCache* context_slot_cache_;
1227 DescriptorLookupCache* descriptor_lookup_cache_;
1228 HandleScopeData handle_scope_data_;
1229 HandleScopeImplementer* handle_scope_implementer_;
1230 UnicodeCache* unicode_cache_;
1232 Zone interface_descriptor_zone_;
1233 InnerPointerToCodeCache* inner_pointer_to_code_cache_;
1234 GlobalHandles* global_handles_;
1235 EternalHandles* eternal_handles_;
1236 ThreadManager* thread_manager_;
1237 RuntimeState runtime_state_;
1239 bool has_installed_extensions_;
1240 unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1241 unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1242 unibrow::Mapping<unibrow::Ecma262Canonicalize>
1243 regexp_macro_assembler_canonicalize_;
1244 RegExpStack* regexp_stack_;
1245 DateCache* date_cache_;
1246 ErrorToStringHelper error_tostring_helper_;
1247 unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
1248 CallInterfaceDescriptorData* call_descriptor_data_;
1249 base::RandomNumberGenerator* random_number_generator_;
1251 // Whether the isolate has been created for snapshotting.
1252 bool serializer_enabled_;
1254 // True if fatal error has been signaled for this isolate.
1255 bool has_fatal_error_;
1257 // True if this isolate was initialized from a snapshot.
1258 bool initialized_from_snapshot_;
1260 // Time stamp at initialization.
1261 double time_millis_at_init_;
1264 // A static array of histogram info for each type.
1265 HistogramInfo heap_histograms_[LAST_TYPE + 1];
1266 JSObject::SpillInformation js_spill_information_;
1270 CpuProfiler* cpu_profiler_;
1271 HeapProfiler* heap_profiler_;
1272 FunctionEntryHook function_entry_hook_;
1274 interpreter::Interpreter* interpreter_;
1276 typedef std::pair<InterruptCallback, void*> InterruptEntry;
1277 std::queue<InterruptEntry> api_interrupts_queue_;
1279 #define GLOBAL_BACKING_STORE(type, name, initialvalue) \
1281 ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1282 #undef GLOBAL_BACKING_STORE
1284 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
1285 type name##_[length];
1286 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1287 #undef GLOBAL_ARRAY_BACKING_STORE
1290 // This class is huge and has a number of fields controlled by
1291 // preprocessor defines. Make sure the offsets of these fields agree
1292 // between compilation units.
1293 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1294 static const intptr_t name##_debug_offset_;
1295 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1296 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1297 #undef ISOLATE_FIELD_OFFSET
1300 DeferredHandles* deferred_handles_head_;
1301 OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
1303 // Counts deopt points if deopt_every_n_times is enabled.
1304 unsigned int stress_deopt_count_;
1306 Address vector_store_virtual_register_;
1308 int next_optimization_id_;
1311 int next_unique_sfi_id_;
1314 // List of callbacks when a Call completes.
1315 List<CallCompletedCallback> call_completed_callbacks_;
1317 v8::Isolate::UseCounterCallback use_counter_callback_;
1318 BasicBlockProfiler* basic_block_profiler_;
1320 List<Object*> partial_snapshot_cache_;
1322 v8::ArrayBuffer::Allocator* array_buffer_allocator_;
1324 FutexWaitListNode futex_wait_list_node_;
1326 std::set<Cancelable*> cancelable_tasks_;
1328 friend class ExecutionAccess;
1329 friend class HandleScopeImplementer;
1330 friend class OptimizingCompileDispatcher;
1331 friend class SweeperThread;
1332 friend class ThreadManager;
1333 friend class Simulator;
1334 friend class StackGuard;
1335 friend class ThreadId;
1336 friend class TestMemoryAllocatorScope;
1337 friend class TestCodeRangeScope;
1338 friend class v8::Isolate;
1339 friend class v8::Locker;
1340 friend class v8::Unlocker;
1341 friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
1343 DISALLOW_COPY_AND_ASSIGN(Isolate);
1347 #undef FIELD_ACCESSOR
1348 #undef THREAD_LOCAL_TOP_ACCESSOR
1351 class PromiseOnStack {
1353 PromiseOnStack(Handle<JSFunction> function, Handle<JSObject> promise,
1354 PromiseOnStack* prev)
1355 : function_(function), promise_(promise), prev_(prev) {}
1356 Handle<JSFunction> function() { return function_; }
1357 Handle<JSObject> promise() { return promise_; }
1358 PromiseOnStack* prev() { return prev_; }
1361 Handle<JSFunction> function_;
1362 Handle<JSObject> promise_;
1363 PromiseOnStack* prev_;
1367 // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1368 // class as a work around for a bug in the generated code found with these
1369 // versions of GCC. See V8 issue 122 for details.
1370 class SaveContext BASE_EMBEDDED {
1372 explicit SaveContext(Isolate* isolate);
1375 Handle<Context> context() { return context_; }
1376 SaveContext* prev() { return prev_; }
1378 // Returns true if this save context is below a given JavaScript frame.
1379 bool IsBelowFrame(JavaScriptFrame* frame) {
1380 return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
1385 Handle<Context> context_;
1387 Address c_entry_fp_;
1391 class AssertNoContextChange BASE_EMBEDDED {
1394 explicit AssertNoContextChange(Isolate* isolate);
1395 ~AssertNoContextChange() {
1396 DCHECK(isolate_->context() == *context_);
1401 Handle<Context> context_;
1404 explicit AssertNoContextChange(Isolate* isolate) { }
1409 class ExecutionAccess BASE_EMBEDDED {
1411 explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1414 ~ExecutionAccess() { Unlock(isolate_); }
1416 static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
1417 static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1419 static bool TryLock(Isolate* isolate) {
1420 return isolate->break_access()->TryLock();
1428 // Support for checking for stack-overflows.
1429 class StackLimitCheck BASE_EMBEDDED {
1431 explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
1433 // Use this to check for stack-overflows in C++ code.
1434 bool HasOverflowed() const {
1435 StackGuard* stack_guard = isolate_->stack_guard();
1436 return GetCurrentStackPosition() < stack_guard->real_climit();
1439 // Use this to check for interrupt request in C++ code.
1440 bool InterruptRequested() {
1441 StackGuard* stack_guard = isolate_->stack_guard();
1442 return GetCurrentStackPosition() < stack_guard->climit();
1445 // Use this to check for stack-overflow when entering runtime from JS code.
1446 bool JsHasOverflowed(uintptr_t gap = 0) const;
1453 // Support for temporarily postponing interrupts. When the outermost
1454 // postpone scope is left the interrupts will be re-enabled and any
1455 // interrupts that occurred while in the scope will be taken into
1457 class PostponeInterruptsScope BASE_EMBEDDED {
1459 PostponeInterruptsScope(Isolate* isolate,
1460 int intercept_mask = StackGuard::ALL_INTERRUPTS)
1461 : stack_guard_(isolate->stack_guard()),
1462 intercept_mask_(intercept_mask),
1463 intercepted_flags_(0) {
1464 stack_guard_->PushPostponeInterruptsScope(this);
1467 ~PostponeInterruptsScope() {
1468 stack_guard_->PopPostponeInterruptsScope();
1471 // Find the bottom-most scope that intercepts this interrupt.
1472 // Return whether the interrupt has been intercepted.
1473 bool Intercept(StackGuard::InterruptFlag flag);
1476 StackGuard* stack_guard_;
1477 int intercept_mask_;
1478 int intercepted_flags_;
1479 PostponeInterruptsScope* prev_;
1481 friend class StackGuard;
1485 class CodeTracer final : public Malloced {
1487 explicit CodeTracer(int isolate_id)
1490 if (!ShouldRedirect()) {
1495 if (FLAG_redirect_code_traces_to == NULL) {
1498 base::OS::GetCurrentProcessId(),
1501 StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
1504 WriteChars(filename_.start(), "", 0, false);
1509 explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
1510 ~Scope() { tracer_->CloseFile(); }
1512 FILE* file() const { return tracer_->file(); }
1515 CodeTracer* tracer_;
1519 if (!ShouldRedirect()) {
1523 if (file_ == NULL) {
1524 file_ = base::OS::FOpen(filename_.start(), "ab");
1531 if (!ShouldRedirect()) {
1535 if (--scope_depth_ == 0) {
1541 FILE* file() const { return file_; }
1544 static bool ShouldRedirect() {
1545 return FLAG_redirect_code_traces;
1548 EmbeddedVector<char, 128> filename_;
1553 } // namespace internal
1556 #endif // V8_ISOLATE_H_