Merge remote-tracking branch 'origin/v0.10'
[platform/upstream/nodejs.git] / deps / v8 / src / isolate.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include <stdlib.h>
29
30 #include "v8.h"
31
32 #include "ast.h"
33 #include "bootstrapper.h"
34 #include "codegen.h"
35 #include "compilation-cache.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "heap-profiler.h"
39 #include "hydrogen.h"
40 #include "isolate.h"
41 #include "lithium-allocator.h"
42 #include "log.h"
43 #include "marking-thread.h"
44 #include "messages.h"
45 #include "platform.h"
46 #include "regexp-stack.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
49 #include "serialize.h"
50 #include "simulator.h"
51 #include "spaces.h"
52 #include "stub-cache.h"
53 #include "sweeper-thread.h"
54 #include "version.h"
55 #include "vm-state-inl.h"
56
57
58 namespace v8 {
59 namespace internal {
60
61 Atomic32 ThreadId::highest_thread_id_ = 0;
62
63 int ThreadId::AllocateThreadId() {
64   int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
65   return new_id;
66 }
67
68
69 int ThreadId::GetCurrentThreadId() {
70   int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
71   if (thread_id == 0) {
72     thread_id = AllocateThreadId();
73     Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
74   }
75   return thread_id;
76 }
77
78
79 ThreadLocalTop::ThreadLocalTop() {
80   InitializeInternal();
81   // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
82   // before an isolate is initialized. The initialize methods below do
83   // not touch it to preserve its value.
84   ignore_out_of_memory_ = false;
85 }
86
87
88 void ThreadLocalTop::InitializeInternal() {
89   c_entry_fp_ = 0;
90   handler_ = 0;
91 #ifdef USE_SIMULATOR
92   simulator_ = NULL;
93 #endif
94   js_entry_sp_ = NULL;
95   external_callback_ = NULL;
96   current_vm_state_ = EXTERNAL;
97   try_catch_handler_address_ = NULL;
98   context_ = NULL;
99   thread_id_ = ThreadId::Invalid();
100   external_caught_exception_ = false;
101   failed_access_check_callback_ = NULL;
102   save_context_ = NULL;
103   catcher_ = NULL;
104   top_lookup_result_ = NULL;
105
106   // These members are re-initialized later after deserialization
107   // is complete.
108   pending_exception_ = NULL;
109   has_pending_message_ = false;
110   pending_message_obj_ = NULL;
111   pending_message_script_ = NULL;
112   scheduled_exception_ = NULL;
113 }
114
115
116 void ThreadLocalTop::Initialize() {
117   InitializeInternal();
118 #ifdef USE_SIMULATOR
119 #ifdef V8_TARGET_ARCH_ARM
120   simulator_ = Simulator::current(isolate_);
121 #elif V8_TARGET_ARCH_MIPS
122   simulator_ = Simulator::current(isolate_);
123 #endif
124 #endif
125   thread_id_ = ThreadId::Current();
126 }
127
128
129 v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
130   return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
131 }
132
133
134 int SystemThreadManager::NumberOfParallelSystemThreads(
135     ParallelSystemComponent type) {
136   int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
137   ASSERT(number_of_threads > 0);
138   if (number_of_threads ==  1) {
139     return 0;
140   }
141   if (type == PARALLEL_SWEEPING) {
142     return number_of_threads;
143   } else if (type == CONCURRENT_SWEEPING) {
144     return number_of_threads - 1;
145   } else if (type == PARALLEL_MARKING) {
146     return number_of_threads;
147   }
148   return 1;
149 }
150
151
152 // Create a dummy thread that will wait forever on a semaphore. The only
153 // purpose for this thread is to have some stack area to save essential data
154 // into for use by a stacks only core dump (aka minidump).
155 class PreallocatedMemoryThread: public Thread {
156  public:
157   char* data() {
158     if (data_ready_semaphore_ != NULL) {
159       // Initial access is guarded until the data has been published.
160       data_ready_semaphore_->Wait();
161       delete data_ready_semaphore_;
162       data_ready_semaphore_ = NULL;
163     }
164     return data_;
165   }
166
167   unsigned length() {
168     if (data_ready_semaphore_ != NULL) {
169       // Initial access is guarded until the data has been published.
170       data_ready_semaphore_->Wait();
171       delete data_ready_semaphore_;
172       data_ready_semaphore_ = NULL;
173     }
174     return length_;
175   }
176
177   // Stop the PreallocatedMemoryThread and release its resources.
178   void StopThread() {
179     keep_running_ = false;
180     wait_for_ever_semaphore_->Signal();
181
182     // Wait for the thread to terminate.
183     Join();
184
185     if (data_ready_semaphore_ != NULL) {
186       delete data_ready_semaphore_;
187       data_ready_semaphore_ = NULL;
188     }
189
190     delete wait_for_ever_semaphore_;
191     wait_for_ever_semaphore_ = NULL;
192   }
193
194  protected:
195   // When the thread starts running it will allocate a fixed number of bytes
196   // on the stack and publish the location of this memory for others to use.
197   void Run() {
198     EmbeddedVector<char, 15 * 1024> local_buffer;
199
200     // Initialize the buffer with a known good value.
201     OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
202                 local_buffer.length());
203
204     // Publish the local buffer and signal its availability.
205     data_ = local_buffer.start();
206     length_ = local_buffer.length();
207     data_ready_semaphore_->Signal();
208
209     while (keep_running_) {
210       // This thread will wait here until the end of time.
211       wait_for_ever_semaphore_->Wait();
212     }
213
214     // Make sure we access the buffer after the wait to remove all possibility
215     // of it being optimized away.
216     OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
217                 local_buffer.length());
218   }
219
220
221  private:
222   PreallocatedMemoryThread()
223       : Thread("v8:PreallocMem"),
224         keep_running_(true),
225         wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
226         data_ready_semaphore_(OS::CreateSemaphore(0)),
227         data_(NULL),
228         length_(0) {
229   }
230
231   // Used to make sure that the thread keeps looping even for spurious wakeups.
232   bool keep_running_;
233
234   // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
235   Semaphore* wait_for_ever_semaphore_;
236   // Semaphore to signal that the data has been initialized.
237   Semaphore* data_ready_semaphore_;
238
239   // Location and size of the preallocated memory block.
240   char* data_;
241   unsigned length_;
242
243   friend class Isolate;
244
245   DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
246 };
247
248
249 void Isolate::PreallocatedMemoryThreadStart() {
250   if (preallocated_memory_thread_ != NULL) return;
251   preallocated_memory_thread_ = new PreallocatedMemoryThread();
252   preallocated_memory_thread_->Start();
253 }
254
255
256 void Isolate::PreallocatedMemoryThreadStop() {
257   if (preallocated_memory_thread_ == NULL) return;
258   preallocated_memory_thread_->StopThread();
259   // Done with the thread entirely.
260   delete preallocated_memory_thread_;
261   preallocated_memory_thread_ = NULL;
262 }
263
264
265 void Isolate::PreallocatedStorageInit(size_t size) {
266   ASSERT(free_list_.next_ == &free_list_);
267   ASSERT(free_list_.previous_ == &free_list_);
268   PreallocatedStorage* free_chunk =
269       reinterpret_cast<PreallocatedStorage*>(new char[size]);
270   free_list_.next_ = free_list_.previous_ = free_chunk;
271   free_chunk->next_ = free_chunk->previous_ = &free_list_;
272   free_chunk->size_ = size - sizeof(PreallocatedStorage);
273   preallocated_storage_preallocated_ = true;
274 }
275
276
277 void* Isolate::PreallocatedStorageNew(size_t size) {
278   if (!preallocated_storage_preallocated_) {
279     return FreeStoreAllocationPolicy().New(size);
280   }
281   ASSERT(free_list_.next_ != &free_list_);
282   ASSERT(free_list_.previous_ != &free_list_);
283
284   size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
285   // Search for exact fit.
286   for (PreallocatedStorage* storage = free_list_.next_;
287        storage != &free_list_;
288        storage = storage->next_) {
289     if (storage->size_ == size) {
290       storage->Unlink();
291       storage->LinkTo(&in_use_list_);
292       return reinterpret_cast<void*>(storage + 1);
293     }
294   }
295   // Search for first fit.
296   for (PreallocatedStorage* storage = free_list_.next_;
297        storage != &free_list_;
298        storage = storage->next_) {
299     if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
300       storage->Unlink();
301       storage->LinkTo(&in_use_list_);
302       PreallocatedStorage* left_over =
303           reinterpret_cast<PreallocatedStorage*>(
304               reinterpret_cast<char*>(storage + 1) + size);
305       left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
306       ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
307              storage->size_);
308       storage->size_ = size;
309       left_over->LinkTo(&free_list_);
310       return reinterpret_cast<void*>(storage + 1);
311     }
312   }
313   // Allocation failure.
314   ASSERT(false);
315   return NULL;
316 }
317
318
319 // We don't attempt to coalesce.
320 void Isolate::PreallocatedStorageDelete(void* p) {
321   if (p == NULL) {
322     return;
323   }
324   if (!preallocated_storage_preallocated_) {
325     FreeStoreAllocationPolicy::Delete(p);
326     return;
327   }
328   PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
329   ASSERT(storage->next_->previous_ == storage);
330   ASSERT(storage->previous_->next_ == storage);
331   storage->Unlink();
332   storage->LinkTo(&free_list_);
333 }
334
335 Isolate* Isolate::default_isolate_ = NULL;
336 Thread::LocalStorageKey Isolate::isolate_key_;
337 Thread::LocalStorageKey Isolate::thread_id_key_;
338 Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
339 Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
340 Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
341 Atomic32 Isolate::isolate_counter_ = 0;
342
343 Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
344     ThreadId thread_id) {
345   ASSERT(!thread_id.Equals(ThreadId::Invalid()));
346   PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
347   {
348     ScopedLock lock(process_wide_mutex_);
349     ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
350     thread_data_table_->Insert(per_thread);
351     ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
352   }
353   return per_thread;
354 }
355
356
357 Isolate::PerIsolateThreadData*
358     Isolate::FindOrAllocatePerThreadDataForThisThread() {
359   ThreadId thread_id = ThreadId::Current();
360   PerIsolateThreadData* per_thread = NULL;
361   {
362     ScopedLock lock(process_wide_mutex_);
363     per_thread = thread_data_table_->Lookup(this, thread_id);
364     if (per_thread == NULL) {
365       per_thread = AllocatePerIsolateThreadData(thread_id);
366     }
367   }
368   return per_thread;
369 }
370
371
372 Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
373   ThreadId thread_id = ThreadId::Current();
374   return FindPerThreadDataForThread(thread_id);
375 }
376
377
378 Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
379     ThreadId thread_id) {
380   PerIsolateThreadData* per_thread = NULL;
381   {
382     ScopedLock lock(process_wide_mutex_);
383     per_thread = thread_data_table_->Lookup(this, thread_id);
384   }
385   return per_thread;
386 }
387
388
389 void Isolate::EnsureDefaultIsolate() {
390   ScopedLock lock(process_wide_mutex_);
391   if (default_isolate_ == NULL) {
392     isolate_key_ = Thread::CreateThreadLocalKey();
393     thread_id_key_ = Thread::CreateThreadLocalKey();
394     per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
395     thread_data_table_ = new Isolate::ThreadDataTable();
396     default_isolate_ = new Isolate();
397   }
398   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
399   // because a non-null thread data may be already set.
400   if (Thread::GetThreadLocal(isolate_key_) == NULL) {
401     Thread::SetThreadLocal(isolate_key_, default_isolate_);
402   }
403 }
404
405 struct StaticInitializer {
406   StaticInitializer() {
407     Isolate::EnsureDefaultIsolate();
408   }
409 } static_initializer;
410
411 #ifdef ENABLE_DEBUGGER_SUPPORT
412 Debugger* Isolate::GetDefaultIsolateDebugger() {
413   EnsureDefaultIsolate();
414   return default_isolate_->debugger();
415 }
416 #endif
417
418
419 StackGuard* Isolate::GetDefaultIsolateStackGuard() {
420   EnsureDefaultIsolate();
421   return default_isolate_->stack_guard();
422 }
423
424
425 void Isolate::EnterDefaultIsolate() {
426   EnsureDefaultIsolate();
427   ASSERT(default_isolate_ != NULL);
428
429   PerIsolateThreadData* data = CurrentPerIsolateThreadData();
430   // If not yet in default isolate - enter it.
431   if (data == NULL || data->isolate() != default_isolate_) {
432     default_isolate_->Enter();
433   }
434 }
435
436
437 v8::Isolate* Isolate::GetDefaultIsolateForLocking() {
438   EnsureDefaultIsolate();
439   return reinterpret_cast<v8::Isolate*>(default_isolate_);
440 }
441
442
443 Address Isolate::get_address_from_id(Isolate::AddressId id) {
444   return isolate_addresses_[id];
445 }
446
447
448 char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
449   ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
450   Iterate(v, thread);
451   return thread_storage + sizeof(ThreadLocalTop);
452 }
453
454
455 void Isolate::IterateThread(ThreadVisitor* v, char* t) {
456   ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
457   v->VisitThread(this, thread);
458 }
459
460
461 void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
462   // Visit the roots from the top for a given thread.
463   Object* pending;
464   // The pending exception can sometimes be a failure.  We can't show
465   // that to the GC, which only understands objects.
466   if (thread->pending_exception_->ToObject(&pending)) {
467     v->VisitPointer(&pending);
468     thread->pending_exception_ = pending;  // In case GC updated it.
469   }
470   v->VisitPointer(&(thread->pending_message_obj_));
471   v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
472   v->VisitPointer(BitCast<Object**>(&(thread->context_)));
473   Object* scheduled;
474   if (thread->scheduled_exception_->ToObject(&scheduled)) {
475     v->VisitPointer(&scheduled);
476     thread->scheduled_exception_ = scheduled;
477   }
478
479   for (v8::TryCatch* block = thread->TryCatchHandler();
480        block != NULL;
481        block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
482     v->VisitPointer(BitCast<Object**>(&(block->exception_)));
483     v->VisitPointer(BitCast<Object**>(&(block->message_)));
484   }
485
486   // Iterate over pointers on native execution stack.
487   for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
488     it.frame()->Iterate(v);
489   }
490
491   // Iterate pointers in live lookup results.
492   thread->top_lookup_result_->Iterate(v);
493 }
494
495
496 void Isolate::Iterate(ObjectVisitor* v) {
497   ThreadLocalTop* current_t = thread_local_top();
498   Iterate(v, current_t);
499 }
500
501 void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
502   for (DeferredHandles* deferred = deferred_handles_head_;
503        deferred != NULL;
504        deferred = deferred->next_) {
505     deferred->Iterate(visitor);
506   }
507 }
508
509
510 #ifdef DEBUG
511 bool Isolate::IsDeferredHandle(Object** handle) {
512   // Each DeferredHandles instance keeps the handles to one job in the
513   // parallel recompilation queue, containing a list of blocks.  Each block
514   // contains kHandleBlockSize handles except for the first block, which may
515   // not be fully filled.
516   // We iterate through all the blocks to see whether the argument handle
517   // belongs to one of the blocks.  If so, it is deferred.
518   for (DeferredHandles* deferred = deferred_handles_head_;
519        deferred != NULL;
520        deferred = deferred->next_) {
521     List<Object**>* blocks = &deferred->blocks_;
522     for (int i = 0; i < blocks->length(); i++) {
523       Object** block_limit = (i == 0) ? deferred->first_block_limit_
524                                       : blocks->at(i) + kHandleBlockSize;
525       if (blocks->at(i) <= handle && handle < block_limit) return true;
526     }
527   }
528   return false;
529 }
530 #endif  // DEBUG
531
532
533 void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
534   // The ARM simulator has a separate JS stack.  We therefore register
535   // the C++ try catch handler with the simulator and get back an
536   // address that can be used for comparisons with addresses into the
537   // JS stack.  When running without the simulator, the address
538   // returned will be the address of the C++ try catch handler itself.
539   Address address = reinterpret_cast<Address>(
540       SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
541   thread_local_top()->set_try_catch_handler_address(address);
542 }
543
544
545 void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
546   ASSERT(thread_local_top()->TryCatchHandler() == that);
547   thread_local_top()->set_try_catch_handler_address(
548       reinterpret_cast<Address>(that->next_));
549   thread_local_top()->catcher_ = NULL;
550   SimulatorStack::UnregisterCTryCatch();
551 }
552
553
554 Handle<String> Isolate::StackTraceString() {
555   if (stack_trace_nesting_level_ == 0) {
556     stack_trace_nesting_level_++;
557     HeapStringAllocator allocator;
558     StringStream::ClearMentionedObjectCache();
559     StringStream accumulator(&allocator);
560     incomplete_message_ = &accumulator;
561     PrintStack(&accumulator);
562     Handle<String> stack_trace = accumulator.ToString();
563     incomplete_message_ = NULL;
564     stack_trace_nesting_level_ = 0;
565     return stack_trace;
566   } else if (stack_trace_nesting_level_ == 1) {
567     stack_trace_nesting_level_++;
568     OS::PrintError(
569       "\n\nAttempt to print stack while printing stack (double fault)\n");
570     OS::PrintError(
571       "If you are lucky you may find a partial stack dump on stdout.\n\n");
572     incomplete_message_->OutputToStdOut();
573     return factory()->empty_string();
574   } else {
575     OS::Abort();
576     // Unreachable
577     return factory()->empty_string();
578   }
579 }
580
581
582 void Isolate::PushStackTraceAndDie(unsigned int magic,
583                                    Object* object,
584                                    Map* map,
585                                    unsigned int magic2) {
586   const int kMaxStackTraceSize = 8192;
587   Handle<String> trace = StackTraceString();
588   uint8_t buffer[kMaxStackTraceSize];
589   int length = Min(kMaxStackTraceSize - 1, trace->length());
590   String::WriteToFlat(*trace, buffer, 0, length);
591   buffer[length] = '\0';
592   // TODO(dcarney): convert buffer to utf8?
593   OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
594                  magic, magic2,
595                  static_cast<void*>(object), static_cast<void*>(map),
596                  reinterpret_cast<char*>(buffer));
597   OS::Abort();
598 }
599
600
601 // Determines whether the given stack frame should be displayed in
602 // a stack trace.  The caller is the error constructor that asked
603 // for the stack trace to be collected.  The first time a construct
604 // call to this function is encountered it is skipped.  The seen_caller
605 // in/out parameter is used to remember if the caller has been seen
606 // yet.
607 static bool IsVisibleInStackTrace(StackFrame* raw_frame,
608                                   Object* caller,
609                                   bool* seen_caller) {
610   // Only display JS frames.
611   if (!raw_frame->is_java_script()) return false;
612   JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
613   Object* raw_fun = frame->function();
614   // Not sure when this can happen but skip it just in case.
615   if (!raw_fun->IsJSFunction()) return false;
616   if ((raw_fun == caller) && !(*seen_caller)) {
617     *seen_caller = true;
618     return false;
619   }
620   // Skip all frames until we've seen the caller.
621   if (!(*seen_caller)) return false;
622   // Also, skip non-visible built-in functions and any call with the builtins
623   // object as receiver, so as to not reveal either the builtins object or
624   // an internal function.
625   // The --builtins-in-stack-traces command line flag allows including
626   // internal call sites in the stack trace for debugging purposes.
627   if (!FLAG_builtins_in_stack_traces) {
628     JSFunction* fun = JSFunction::cast(raw_fun);
629     if (frame->receiver()->IsJSBuiltinsObject() ||
630         (fun->IsBuiltin() && !fun->shared()->native())) {
631       return false;
632     }
633   }
634   return true;
635 }
636
637
638 Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
639                                                  Handle<Object> caller,
640                                                  int limit) {
641   limit = Max(limit, 0);  // Ensure that limit is not negative.
642   int initial_size = Min(limit, 10);
643   Handle<FixedArray> elements =
644       factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
645
646   // If the caller parameter is a function we skip frames until we're
647   // under it before starting to collect.
648   bool seen_caller = !caller->IsJSFunction();
649   // First element is reserved to store the number of non-strict frames.
650   int cursor = 1;
651   int frames_seen = 0;
652   int non_strict_frames = 0;
653   bool encountered_strict_function = false;
654   for (StackFrameIterator iter(this);
655        !iter.done() && frames_seen < limit;
656        iter.Advance()) {
657     StackFrame* raw_frame = iter.frame();
658     if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
659       frames_seen++;
660       JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
661       // Set initial size to the maximum inlining level + 1 for the outermost
662       // function.
663       List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
664       frame->Summarize(&frames);
665       for (int i = frames.length() - 1; i >= 0; i--) {
666         if (cursor + 4 > elements->length()) {
667           int new_capacity = JSObject::NewElementsCapacity(elements->length());
668           Handle<FixedArray> new_elements =
669               factory()->NewFixedArrayWithHoles(new_capacity);
670           for (int i = 0; i < cursor; i++) {
671             new_elements->set(i, elements->get(i));
672           }
673           elements = new_elements;
674         }
675         ASSERT(cursor + 4 <= elements->length());
676
677         Handle<Object> recv = frames[i].receiver();
678         Handle<JSFunction> fun = frames[i].function();
679         Handle<Code> code = frames[i].code();
680         Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
681         // The stack trace API should not expose receivers and function
682         // objects on frames deeper than the top-most one with a strict
683         // mode function.  The number of non-strict frames is stored as
684         // first element in the result array.
685         if (!encountered_strict_function) {
686           if (!fun->shared()->is_classic_mode()) {
687             encountered_strict_function = true;
688           } else {
689             non_strict_frames++;
690           }
691         }
692         elements->set(cursor++, *recv);
693         elements->set(cursor++, *fun);
694         elements->set(cursor++, *code);
695         elements->set(cursor++, *offset);
696       }
697     }
698   }
699   elements->set(0, Smi::FromInt(non_strict_frames));
700   Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
701   result->set_length(Smi::FromInt(cursor));
702   return result;
703 }
704
705
706 void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
707   if (capture_stack_trace_for_uncaught_exceptions_) {
708     // Capture stack trace for a detailed exception message.
709     Handle<String> key = factory()->hidden_stack_trace_string();
710     Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
711         stack_trace_for_uncaught_exceptions_frame_limit_,
712         stack_trace_for_uncaught_exceptions_options_);
713     JSObject::SetHiddenProperty(error_object, key, stack_trace);
714   }
715 }
716
717
718 Handle<JSArray> Isolate::CaptureCurrentStackTrace(
719     int frame_limit, StackTrace::StackTraceOptions options) {
720   // Ensure no negative values.
721   int limit = Max(frame_limit, 0);
722   Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
723
724   Handle<String> column_key =
725       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
726   Handle<String> line_key =
727       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
728   Handle<String> script_key =
729       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
730   Handle<String> script_name_or_source_url_key =
731       factory()->InternalizeOneByteString(
732           STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
733   Handle<String> function_key =
734       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
735   Handle<String> eval_key =
736       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
737   Handle<String> constructor_key =
738       factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
739
740   StackTraceFrameIterator it(this);
741   int frames_seen = 0;
742   while (!it.done() && (frames_seen < limit)) {
743     JavaScriptFrame* frame = it.frame();
744     // Set initial size to the maximum inlining level + 1 for the outermost
745     // function.
746     List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
747     frame->Summarize(&frames);
748     for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
749       // Create a JSObject to hold the information for the StackFrame.
750       Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
751
752       Handle<JSFunction> fun = frames[i].function();
753       Handle<Script> script(Script::cast(fun->shared()->script()));
754
755       if (options & StackTrace::kLineNumber) {
756         int script_line_offset = script->line_offset()->value();
757         int position = frames[i].code()->SourcePosition(frames[i].pc());
758         int line_number = GetScriptLineNumber(script, position);
759         // line_number is already shifted by the script_line_offset.
760         int relative_line_number = line_number - script_line_offset;
761         if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
762           Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
763           int start = (relative_line_number == 0) ? 0 :
764               Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
765           int column_offset = position - start;
766           if (relative_line_number == 0) {
767             // For the case where the code is on the same line as the script
768             // tag.
769             column_offset += script->column_offset()->value();
770           }
771           CHECK_NOT_EMPTY_HANDLE(
772               this,
773               JSObject::SetLocalPropertyIgnoreAttributes(
774                   stack_frame, column_key,
775                   Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE));
776         }
777         CHECK_NOT_EMPTY_HANDLE(
778             this,
779             JSObject::SetLocalPropertyIgnoreAttributes(
780                 stack_frame, line_key,
781                 Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
782       }
783
784       if (options & StackTrace::kScriptName) {
785         Handle<Object> script_name(script->name(), this);
786         CHECK_NOT_EMPTY_HANDLE(this,
787                                JSObject::SetLocalPropertyIgnoreAttributes(
788                                    stack_frame, script_key, script_name, NONE));
789       }
790
791       if (options & StackTrace::kScriptNameOrSourceURL) {
792         Handle<Object> result = GetScriptNameOrSourceURL(script);
793         CHECK_NOT_EMPTY_HANDLE(this,
794                                JSObject::SetLocalPropertyIgnoreAttributes(
795                                    stack_frame, script_name_or_source_url_key,
796                                    result, NONE));
797       }
798
799       if (options & StackTrace::kFunctionName) {
800         Handle<Object> fun_name(fun->shared()->name(), this);
801         if (!fun_name->BooleanValue()) {
802           fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
803         }
804         CHECK_NOT_EMPTY_HANDLE(this,
805                                JSObject::SetLocalPropertyIgnoreAttributes(
806                                    stack_frame, function_key, fun_name, NONE));
807       }
808
809       if (options & StackTrace::kIsEval) {
810         int type = Smi::cast(script->compilation_type())->value();
811         Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
812             factory()->true_value() : factory()->false_value();
813         CHECK_NOT_EMPTY_HANDLE(this,
814                                JSObject::SetLocalPropertyIgnoreAttributes(
815                                    stack_frame, eval_key, is_eval, NONE));
816       }
817
818       if (options & StackTrace::kIsConstructor) {
819         Handle<Object> is_constructor = (frames[i].is_constructor()) ?
820             factory()->true_value() : factory()->false_value();
821         CHECK_NOT_EMPTY_HANDLE(this,
822                                JSObject::SetLocalPropertyIgnoreAttributes(
823                                    stack_frame, constructor_key,
824                                    is_constructor, NONE));
825       }
826
827       FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
828       frames_seen++;
829     }
830     it.Advance();
831   }
832
833   stack_trace->set_length(Smi::FromInt(frames_seen));
834   return stack_trace;
835 }
836
837
838 void Isolate::PrintStack() {
839   if (stack_trace_nesting_level_ == 0) {
840     stack_trace_nesting_level_++;
841
842     StringAllocator* allocator;
843     if (preallocated_message_space_ == NULL) {
844       allocator = new HeapStringAllocator();
845     } else {
846       allocator = preallocated_message_space_;
847     }
848
849     StringStream::ClearMentionedObjectCache();
850     StringStream accumulator(allocator);
851     incomplete_message_ = &accumulator;
852     PrintStack(&accumulator);
853     accumulator.OutputToStdOut();
854     InitializeLoggingAndCounters();
855     accumulator.Log();
856     incomplete_message_ = NULL;
857     stack_trace_nesting_level_ = 0;
858     if (preallocated_message_space_ == NULL) {
859       // Remove the HeapStringAllocator created above.
860       delete allocator;
861     }
862   } else if (stack_trace_nesting_level_ == 1) {
863     stack_trace_nesting_level_++;
864     OS::PrintError(
865       "\n\nAttempt to print stack while printing stack (double fault)\n");
866     OS::PrintError(
867       "If you are lucky you may find a partial stack dump on stdout.\n\n");
868     incomplete_message_->OutputToStdOut();
869   }
870 }
871
872
873 static void PrintFrames(Isolate* isolate,
874                         StringStream* accumulator,
875                         StackFrame::PrintMode mode) {
876   StackFrameIterator it(isolate);
877   for (int i = 0; !it.done(); it.Advance()) {
878     it.frame()->Print(accumulator, mode, i++);
879   }
880 }
881
882
883 void Isolate::PrintStack(StringStream* accumulator) {
884   if (!IsInitialized()) {
885     accumulator->Add(
886         "\n==== JS stack trace is not available =======================\n\n");
887     accumulator->Add(
888         "\n==== Isolate for the thread is not initialized =============\n\n");
889     return;
890   }
891   // The MentionedObjectCache is not GC-proof at the moment.
892   AssertNoAllocation nogc;
893   ASSERT(StringStream::IsMentionedObjectCacheClear());
894
895   // Avoid printing anything if there are no frames.
896   if (c_entry_fp(thread_local_top()) == 0) return;
897
898   accumulator->Add(
899       "\n==== JS stack trace =========================================\n\n");
900   PrintFrames(this, accumulator, StackFrame::OVERVIEW);
901
902   accumulator->Add(
903       "\n==== Details ================================================\n\n");
904   PrintFrames(this, accumulator, StackFrame::DETAILS);
905
906   accumulator->PrintMentionedObjectCache();
907   accumulator->Add("=====================\n\n");
908 }
909
910
911 void Isolate::SetFailedAccessCheckCallback(
912     v8::FailedAccessCheckCallback callback) {
913   thread_local_top()->failed_access_check_callback_ = callback;
914 }
915
916
917 void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
918   if (!thread_local_top()->failed_access_check_callback_) return;
919
920   ASSERT(receiver->IsAccessCheckNeeded());
921   ASSERT(context());
922
923   // Get the data object from access check info.
924   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
925   if (!constructor->shared()->IsApiFunction()) return;
926   Object* data_obj =
927       constructor->shared()->get_api_func_data()->access_check_info();
928   if (data_obj == heap_.undefined_value()) return;
929
930   HandleScope scope(this);
931   Handle<JSObject> receiver_handle(receiver);
932   Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
933   { VMState<EXTERNAL> state(this);
934     thread_local_top()->failed_access_check_callback_(
935       v8::Utils::ToLocal(receiver_handle),
936       type,
937       v8::Utils::ToLocal(data));
938   }
939 }
940
941
942 enum MayAccessDecision {
943   YES, NO, UNKNOWN
944 };
945
946
947 static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
948                                            JSObject* receiver,
949                                            v8::AccessType type) {
950   // During bootstrapping, callback functions are not enabled yet.
951   if (isolate->bootstrapper()->IsActive()) return YES;
952
953   if (receiver->IsJSGlobalProxy()) {
954     Object* receiver_context = JSGlobalProxy::cast(receiver)->native_context();
955     if (!receiver_context->IsContext()) return NO;
956
957     // Get the native context of current top context.
958     // avoid using Isolate::native_context() because it uses Handle.
959     Context* native_context =
960         isolate->context()->global_object()->native_context();
961     if (receiver_context == native_context) return YES;
962
963     if (Context::cast(receiver_context)->security_token() ==
964         native_context->security_token())
965       return YES;
966   }
967
968   return UNKNOWN;
969 }
970
971
972 bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
973                              v8::AccessType type) {
974   ASSERT(receiver->IsAccessCheckNeeded());
975
976   // The callers of this method are not expecting a GC.
977   AssertNoAllocation no_gc;
978
979   // Skip checks for hidden properties access.  Note, we do not
980   // require existence of a context in this case.
981   if (key == heap_.hidden_string()) return true;
982
983   // Check for compatibility between the security tokens in the
984   // current lexical context and the accessed object.
985   ASSERT(context());
986
987   MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
988   if (decision != UNKNOWN) return decision == YES;
989
990   // Get named access check callback
991   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
992   if (!constructor->shared()->IsApiFunction()) return false;
993
994   Object* data_obj =
995      constructor->shared()->get_api_func_data()->access_check_info();
996   if (data_obj == heap_.undefined_value()) return false;
997
998   Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
999   v8::NamedSecurityCallback callback =
1000       v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
1001
1002   if (!callback) return false;
1003
1004   HandleScope scope(this);
1005   Handle<JSObject> receiver_handle(receiver, this);
1006   Handle<Object> key_handle(key, this);
1007   Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
1008   LOG(this, ApiNamedSecurityCheck(key));
1009   bool result = false;
1010   {
1011     // Leaving JavaScript.
1012     VMState<EXTERNAL> state(this);
1013     result = callback(v8::Utils::ToLocal(receiver_handle),
1014                       v8::Utils::ToLocal(key_handle),
1015                       type,
1016                       v8::Utils::ToLocal(data));
1017   }
1018   return result;
1019 }
1020
1021
1022 bool Isolate::MayIndexedAccess(JSObject* receiver,
1023                                uint32_t index,
1024                                v8::AccessType type) {
1025   ASSERT(receiver->IsAccessCheckNeeded());
1026   // Check for compatibility between the security tokens in the
1027   // current lexical context and the accessed object.
1028   ASSERT(context());
1029
1030   MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
1031   if (decision != UNKNOWN) return decision == YES;
1032
1033   // Get indexed access check callback
1034   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
1035   if (!constructor->shared()->IsApiFunction()) return false;
1036
1037   Object* data_obj =
1038       constructor->shared()->get_api_func_data()->access_check_info();
1039   if (data_obj == heap_.undefined_value()) return false;
1040
1041   Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
1042   v8::IndexedSecurityCallback callback =
1043       v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
1044
1045   if (!callback) return false;
1046
1047   HandleScope scope(this);
1048   Handle<JSObject> receiver_handle(receiver, this);
1049   Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
1050   LOG(this, ApiIndexedSecurityCheck(index));
1051   bool result = false;
1052   {
1053     // Leaving JavaScript.
1054     VMState<EXTERNAL> state(this);
1055     result = callback(v8::Utils::ToLocal(receiver_handle),
1056                       index,
1057                       type,
1058                       v8::Utils::ToLocal(data));
1059   }
1060   return result;
1061 }
1062
1063
1064 const char* const Isolate::kStackOverflowMessage =
1065   "Uncaught RangeError: Maximum call stack size exceeded";
1066
1067
1068 Failure* Isolate::StackOverflow() {
1069   HandleScope scope(this);
1070   // At this point we cannot create an Error object using its javascript
1071   // constructor.  Instead, we copy the pre-constructed boilerplate and
1072   // attach the stack trace as a hidden property.
1073   Handle<String> key = factory()->stack_overflow_string();
1074   Handle<JSObject> boilerplate =
1075       Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
1076   Handle<JSObject> exception = Copy(boilerplate);
1077   DoThrow(*exception, NULL);
1078
1079   // Get stack trace limit.
1080   Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
1081   if (!error->IsJSObject()) return Failure::Exception();
1082   Handle<Object> stack_trace_limit =
1083       GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
1084   if (!stack_trace_limit->IsNumber()) return Failure::Exception();
1085   double dlimit = stack_trace_limit->Number();
1086   int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit);
1087
1088   Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
1089       exception, factory()->undefined_value(), limit);
1090   JSObject::SetHiddenProperty(exception,
1091                               factory()->hidden_stack_trace_string(),
1092                               stack_trace);
1093   return Failure::Exception();
1094 }
1095
1096
1097 Failure* Isolate::TerminateExecution() {
1098   DoThrow(heap_.termination_exception(), NULL);
1099   return Failure::Exception();
1100 }
1101
1102
1103 void Isolate::CancelTerminateExecution() {
1104   if (try_catch_handler()) {
1105     try_catch_handler()->has_terminated_ = false;
1106   }
1107   if (has_pending_exception() &&
1108       pending_exception() == heap_.termination_exception()) {
1109     thread_local_top()->external_caught_exception_ = false;
1110     clear_pending_exception();
1111   }
1112   if (has_scheduled_exception() &&
1113       scheduled_exception() == heap_.termination_exception()) {
1114     thread_local_top()->external_caught_exception_ = false;
1115     clear_scheduled_exception();
1116   }
1117 }
1118
1119
1120 Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
1121   DoThrow(exception, location);
1122   return Failure::Exception();
1123 }
1124
1125
1126 Failure* Isolate::ReThrow(MaybeObject* exception) {
1127   bool can_be_caught_externally = false;
1128   bool catchable_by_javascript = is_catchable_by_javascript(exception);
1129   ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
1130
1131   thread_local_top()->catcher_ = can_be_caught_externally ?
1132       try_catch_handler() : NULL;
1133
1134   // Set the exception being re-thrown.
1135   set_pending_exception(exception);
1136   if (exception->IsFailure()) return exception->ToFailureUnchecked();
1137   return Failure::Exception();
1138 }
1139
1140
1141 Failure* Isolate::ThrowIllegalOperation() {
1142   return Throw(heap_.illegal_access_string());
1143 }
1144
1145
1146 void Isolate::ScheduleThrow(Object* exception) {
1147   // When scheduling a throw we first throw the exception to get the
1148   // error reporting if it is uncaught before rescheduling it.
1149   Throw(exception);
1150   PropagatePendingExceptionToExternalTryCatch();
1151   if (has_pending_exception()) {
1152     thread_local_top()->scheduled_exception_ = pending_exception();
1153     thread_local_top()->external_caught_exception_ = false;
1154     clear_pending_exception();
1155   }
1156 }
1157
1158
1159 Failure* Isolate::PromoteScheduledException() {
1160   MaybeObject* thrown = scheduled_exception();
1161   clear_scheduled_exception();
1162   // Re-throw the exception to avoid getting repeated error reporting.
1163   return ReThrow(thrown);
1164 }
1165
1166
1167 void Isolate::PrintCurrentStackTrace(FILE* out) {
1168   StackTraceFrameIterator it(this);
1169   while (!it.done()) {
1170     HandleScope scope(this);
1171     // Find code position if recorded in relocation info.
1172     JavaScriptFrame* frame = it.frame();
1173     int pos = frame->LookupCode()->SourcePosition(frame->pc());
1174     Handle<Object> pos_obj(Smi::FromInt(pos), this);
1175     // Fetch function and receiver.
1176     Handle<JSFunction> fun(JSFunction::cast(frame->function()));
1177     Handle<Object> recv(frame->receiver(), this);
1178     // Advance to the next JavaScript frame and determine if the
1179     // current frame is the top-level frame.
1180     it.Advance();
1181     Handle<Object> is_top_level = it.done()
1182         ? factory()->true_value()
1183         : factory()->false_value();
1184     // Generate and print stack trace line.
1185     Handle<String> line =
1186         Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
1187     if (line->length() > 0) {
1188       line->PrintOn(out);
1189       PrintF(out, "\n");
1190     }
1191   }
1192 }
1193
1194
1195 void Isolate::ComputeLocation(MessageLocation* target) {
1196   *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
1197   StackTraceFrameIterator it(this);
1198   if (!it.done()) {
1199     JavaScriptFrame* frame = it.frame();
1200     JSFunction* fun = JSFunction::cast(frame->function());
1201     Object* script = fun->shared()->script();
1202     if (script->IsScript() &&
1203         !(Script::cast(script)->source()->IsUndefined())) {
1204       int pos = frame->LookupCode()->SourcePosition(frame->pc());
1205       // Compute the location from the function and the reloc info.
1206       Handle<Script> casted_script(Script::cast(script));
1207       *target = MessageLocation(casted_script, pos, pos + 1);
1208     }
1209   }
1210 }
1211
1212
1213 bool Isolate::ShouldReportException(bool* can_be_caught_externally,
1214                                     bool catchable_by_javascript) {
1215   // Find the top-most try-catch handler.
1216   StackHandler* handler =
1217       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
1218   while (handler != NULL && !handler->is_catch()) {
1219     handler = handler->next();
1220   }
1221
1222   // Get the address of the external handler so we can compare the address to
1223   // determine which one is closer to the top of the stack.
1224   Address external_handler_address =
1225       thread_local_top()->try_catch_handler_address();
1226
1227   // The exception has been externally caught if and only if there is
1228   // an external handler which is on top of the top-most try-catch
1229   // handler.
1230   *can_be_caught_externally = external_handler_address != NULL &&
1231       (handler == NULL || handler->address() > external_handler_address ||
1232        !catchable_by_javascript);
1233
1234   if (*can_be_caught_externally) {
1235     // Only report the exception if the external handler is verbose.
1236     return try_catch_handler()->is_verbose_;
1237   } else {
1238     // Report the exception if it isn't caught by JavaScript code.
1239     return handler == NULL;
1240   }
1241 }
1242
1243
1244 bool Isolate::IsErrorObject(Handle<Object> obj) {
1245   if (!obj->IsJSObject()) return false;
1246
1247   String* error_key =
1248       *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")));
1249   Object* error_constructor =
1250       js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
1251
1252   for (Object* prototype = *obj; !prototype->IsNull();
1253        prototype = prototype->GetPrototype(this)) {
1254     if (!prototype->IsJSObject()) return false;
1255     if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
1256       return true;
1257     }
1258   }
1259   return false;
1260 }
1261
1262 static int fatal_exception_depth = 0;
1263
1264 void Isolate::DoThrow(Object* exception, MessageLocation* location) {
1265   ASSERT(!has_pending_exception());
1266
1267   HandleScope scope(this);
1268   Handle<Object> exception_handle(exception, this);
1269
1270   // Determine reporting and whether the exception is caught externally.
1271   bool catchable_by_javascript = is_catchable_by_javascript(exception);
1272   bool can_be_caught_externally = false;
1273   bool should_report_exception =
1274       ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
1275   bool report_exception = catchable_by_javascript && should_report_exception;
1276   bool try_catch_needs_message =
1277       can_be_caught_externally && try_catch_handler()->capture_message_;
1278   bool bootstrapping = bootstrapper()->IsActive();
1279
1280 #ifdef ENABLE_DEBUGGER_SUPPORT
1281   // Notify debugger of exception.
1282   if (catchable_by_javascript) {
1283     debugger_->OnException(exception_handle, report_exception);
1284   }
1285 #endif
1286
1287   // Generate the message if required.
1288   if (report_exception || try_catch_needs_message) {
1289     MessageLocation potential_computed_location;
1290     if (location == NULL) {
1291       // If no location was specified we use a computed one instead.
1292       ComputeLocation(&potential_computed_location);
1293       location = &potential_computed_location;
1294     }
1295     // It's not safe to try to make message objects or collect stack traces
1296     // while the bootstrapper is active since the infrastructure may not have
1297     // been properly initialized.
1298     if (!bootstrapping) {
1299       Handle<String> stack_trace;
1300       if (FLAG_trace_exception) stack_trace = StackTraceString();
1301       Handle<JSArray> stack_trace_object;
1302       if (capture_stack_trace_for_uncaught_exceptions_) {
1303         if (IsErrorObject(exception_handle)) {
1304           // We fetch the stack trace that corresponds to this error object.
1305           String* key = heap()->hidden_stack_trace_string();
1306           Object* stack_property =
1307               JSObject::cast(*exception_handle)->GetHiddenProperty(key);
1308           // Property lookup may have failed.  In this case it's probably not
1309           // a valid Error object.
1310           if (stack_property->IsJSArray()) {
1311             stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property));
1312           }
1313         }
1314         if (stack_trace_object.is_null()) {
1315           // Not an error object, we capture at throw site.
1316           stack_trace_object = CaptureCurrentStackTrace(
1317               stack_trace_for_uncaught_exceptions_frame_limit_,
1318               stack_trace_for_uncaught_exceptions_options_);
1319         }
1320       }
1321
1322       Handle<Object> exception_arg = exception_handle;
1323       // If the exception argument is a custom object, turn it into a string
1324       // before throwing as uncaught exception.  Note that the pending
1325       // exception object to be set later must not be turned into a string.
1326       if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
1327         bool failed = false;
1328         exception_arg = Execution::ToDetailString(exception_arg, &failed);
1329         if (failed) {
1330           exception_arg = factory()->InternalizeOneByteString(
1331               STATIC_ASCII_VECTOR("exception"));
1332         }
1333       }
1334       Handle<Object> message_obj = MessageHandler::MakeMessageObject(
1335           "uncaught_exception",
1336           location,
1337           HandleVector<Object>(&exception_arg, 1),
1338           stack_trace,
1339           stack_trace_object);
1340       thread_local_top()->pending_message_obj_ = *message_obj;
1341       if (location != NULL) {
1342         thread_local_top()->pending_message_script_ = *location->script();
1343         thread_local_top()->pending_message_start_pos_ = location->start_pos();
1344         thread_local_top()->pending_message_end_pos_ = location->end_pos();
1345       }
1346
1347       // If the abort-on-uncaught-exception flag is specified, abort on any
1348       // exception not caught by JavaScript, even when an external handler is
1349       // present.  This flag is intended for use by JavaScript developers, so
1350       // print a user-friendly stack trace (not an internal one).
1351       if (fatal_exception_depth == 0 &&
1352           FLAG_abort_on_uncaught_exception &&
1353           (report_exception || can_be_caught_externally)) {
1354         fatal_exception_depth++;
1355         PrintF(stderr,
1356                "%s\n\nFROM\n",
1357                *MessageHandler::GetLocalizedMessage(this, message_obj));
1358         PrintCurrentStackTrace(stderr);
1359         OS::Abort();
1360       }
1361     } else if (location != NULL && !location->script().is_null()) {
1362       // We are bootstrapping and caught an error where the location is set
1363       // and we have a script for the location.
1364       // In this case we could have an extension (or an internal error
1365       // somewhere) and we print out the line number at which the error occured
1366       // to the console for easier debugging.
1367       int line_number = GetScriptLineNumberSafe(location->script(),
1368                                                 location->start_pos());
1369       if (exception->IsString()) {
1370         OS::PrintError(
1371             "Extension or internal compilation error: %s in %s at line %d.\n",
1372             *String::cast(exception)->ToCString(),
1373             *String::cast(location->script()->name())->ToCString(),
1374             line_number + 1);
1375       } else {
1376         OS::PrintError(
1377             "Extension or internal compilation error in %s at line %d.\n",
1378             *String::cast(location->script()->name())->ToCString(),
1379             line_number + 1);
1380       }
1381     }
1382   }
1383
1384   // Save the message for reporting if the the exception remains uncaught.
1385   thread_local_top()->has_pending_message_ = report_exception;
1386
1387   // Do not forget to clean catcher_ if currently thrown exception cannot
1388   // be caught.  If necessary, ReThrow will update the catcher.
1389   thread_local_top()->catcher_ = can_be_caught_externally ?
1390       try_catch_handler() : NULL;
1391
1392   set_pending_exception(*exception_handle);
1393 }
1394
1395
1396 bool Isolate::IsExternallyCaught() {
1397   ASSERT(has_pending_exception());
1398
1399   if ((thread_local_top()->catcher_ == NULL) ||
1400       (try_catch_handler() != thread_local_top()->catcher_)) {
1401     // When throwing the exception, we found no v8::TryCatch
1402     // which should care about this exception.
1403     return false;
1404   }
1405
1406   if (!is_catchable_by_javascript(pending_exception())) {
1407     return true;
1408   }
1409
1410   // Get the address of the external handler so we can compare the address to
1411   // determine which one is closer to the top of the stack.
1412   Address external_handler_address =
1413       thread_local_top()->try_catch_handler_address();
1414   ASSERT(external_handler_address != NULL);
1415
1416   // The exception has been externally caught if and only if there is
1417   // an external handler which is on top of the top-most try-finally
1418   // handler.
1419   // There should be no try-catch blocks as they would prohibit us from
1420   // finding external catcher in the first place (see catcher_ check above).
1421   //
1422   // Note, that finally clause would rethrow an exception unless it's
1423   // aborted by jumps in control flow like return, break, etc. and we'll
1424   // have another chances to set proper v8::TryCatch.
1425   StackHandler* handler =
1426       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
1427   while (handler != NULL && handler->address() < external_handler_address) {
1428     ASSERT(!handler->is_catch());
1429     if (handler->is_finally()) return false;
1430
1431     handler = handler->next();
1432   }
1433
1434   return true;
1435 }
1436
1437
1438 void Isolate::ReportPendingMessages() {
1439   ASSERT(has_pending_exception());
1440   PropagatePendingExceptionToExternalTryCatch();
1441
1442   // If the pending exception is OutOfMemoryException set out_of_memory in
1443   // the native context.  Note: We have to mark the native context here
1444   // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
1445   // set it.
1446   HandleScope scope(this);
1447   if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
1448     context()->mark_out_of_memory();
1449   } else if (thread_local_top_.pending_exception_ ==
1450              heap()->termination_exception()) {
1451     // Do nothing: if needed, the exception has been already propagated to
1452     // v8::TryCatch.
1453   } else {
1454     if (thread_local_top_.has_pending_message_) {
1455       thread_local_top_.has_pending_message_ = false;
1456       if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
1457         HandleScope scope(this);
1458         Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
1459                                    this);
1460         if (thread_local_top_.pending_message_script_ != NULL) {
1461           Handle<Script> script(thread_local_top_.pending_message_script_);
1462           int start_pos = thread_local_top_.pending_message_start_pos_;
1463           int end_pos = thread_local_top_.pending_message_end_pos_;
1464           MessageLocation location(script, start_pos, end_pos);
1465           MessageHandler::ReportMessage(this, &location, message_obj);
1466         } else {
1467           MessageHandler::ReportMessage(this, NULL, message_obj);
1468         }
1469       }
1470     }
1471   }
1472   clear_pending_message();
1473 }
1474
1475
1476 MessageLocation Isolate::GetMessageLocation() {
1477   ASSERT(has_pending_exception());
1478
1479   if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
1480       thread_local_top_.pending_exception_ != heap()->termination_exception() &&
1481       thread_local_top_.has_pending_message_ &&
1482       !thread_local_top_.pending_message_obj_->IsTheHole() &&
1483       thread_local_top_.pending_message_script_ != NULL) {
1484     Handle<Script> script(thread_local_top_.pending_message_script_);
1485     int start_pos = thread_local_top_.pending_message_start_pos_;
1486     int end_pos = thread_local_top_.pending_message_end_pos_;
1487     return MessageLocation(script, start_pos, end_pos);
1488   }
1489
1490   return MessageLocation();
1491 }
1492
1493
1494 void Isolate::TraceException(bool flag) {
1495   FLAG_trace_exception = flag;  // TODO(isolates): This is an unfortunate use.
1496 }
1497
1498
1499 bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
1500   ASSERT(has_pending_exception());
1501   PropagatePendingExceptionToExternalTryCatch();
1502
1503   // Always reschedule out of memory exceptions.
1504   if (!is_out_of_memory()) {
1505     bool is_termination_exception =
1506         pending_exception() == heap_.termination_exception();
1507
1508     // Do not reschedule the exception if this is the bottom call.
1509     bool clear_exception = is_bottom_call;
1510
1511     if (is_termination_exception) {
1512       if (is_bottom_call) {
1513         thread_local_top()->external_caught_exception_ = false;
1514         clear_pending_exception();
1515         return false;
1516       }
1517     } else if (thread_local_top()->external_caught_exception_) {
1518       // If the exception is externally caught, clear it if there are no
1519       // JavaScript frames on the way to the C++ frame that has the
1520       // external handler.
1521       ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
1522       Address external_handler_address =
1523           thread_local_top()->try_catch_handler_address();
1524       JavaScriptFrameIterator it(this);
1525       if (it.done() || (it.frame()->sp() > external_handler_address)) {
1526         clear_exception = true;
1527       }
1528     }
1529
1530     // Clear the exception if needed.
1531     if (clear_exception) {
1532       thread_local_top()->external_caught_exception_ = false;
1533       clear_pending_exception();
1534       return false;
1535     }
1536   }
1537
1538   // Reschedule the exception.
1539   thread_local_top()->scheduled_exception_ = pending_exception();
1540   clear_pending_exception();
1541   return true;
1542 }
1543
1544
1545 void Isolate::SetCaptureStackTraceForUncaughtExceptions(
1546       bool capture,
1547       int frame_limit,
1548       StackTrace::StackTraceOptions options) {
1549   capture_stack_trace_for_uncaught_exceptions_ = capture;
1550   stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
1551   stack_trace_for_uncaught_exceptions_options_ = options;
1552 }
1553
1554
1555 bool Isolate::is_out_of_memory() {
1556   if (has_pending_exception()) {
1557     MaybeObject* e = pending_exception();
1558     if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
1559       return true;
1560     }
1561   }
1562   if (has_scheduled_exception()) {
1563     MaybeObject* e = scheduled_exception();
1564     if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
1565       return true;
1566     }
1567   }
1568   return false;
1569 }
1570
1571
1572 Handle<Context> Isolate::native_context() {
1573   return Handle<Context>(context()->global_object()->native_context());
1574 }
1575
1576
1577 Handle<Context> Isolate::global_context() {
1578   return Handle<Context>(context()->global_object()->global_context());
1579 }
1580
1581
1582 Handle<Context> Isolate::GetCallingNativeContext() {
1583   JavaScriptFrameIterator it(this);
1584 #ifdef ENABLE_DEBUGGER_SUPPORT
1585   if (debug_->InDebugger()) {
1586     while (!it.done()) {
1587       JavaScriptFrame* frame = it.frame();
1588       Context* context = Context::cast(frame->context());
1589       if (context->native_context() == *debug_->debug_context()) {
1590         it.Advance();
1591       } else {
1592         break;
1593       }
1594     }
1595   }
1596 #endif  // ENABLE_DEBUGGER_SUPPORT
1597   if (it.done()) return Handle<Context>::null();
1598   JavaScriptFrame* frame = it.frame();
1599   Context* context = Context::cast(frame->context());
1600   return Handle<Context>(context->native_context());
1601 }
1602
1603
1604 char* Isolate::ArchiveThread(char* to) {
1605   OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
1606               sizeof(ThreadLocalTop));
1607   InitializeThreadLocal();
1608   clear_pending_exception();
1609   clear_pending_message();
1610   clear_scheduled_exception();
1611   return to + sizeof(ThreadLocalTop);
1612 }
1613
1614
1615 char* Isolate::RestoreThread(char* from) {
1616   OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
1617               sizeof(ThreadLocalTop));
1618   // This might be just paranoia, but it seems to be needed in case a
1619   // thread_local_top_ is restored on a separate OS thread.
1620 #ifdef USE_SIMULATOR
1621 #ifdef V8_TARGET_ARCH_ARM
1622   thread_local_top()->simulator_ = Simulator::current(this);
1623 #elif V8_TARGET_ARCH_MIPS
1624   thread_local_top()->simulator_ = Simulator::current(this);
1625 #endif
1626 #endif
1627   ASSERT(context() == NULL || context()->IsContext());
1628   return from + sizeof(ThreadLocalTop);
1629 }
1630
1631
1632 Isolate::ThreadDataTable::ThreadDataTable()
1633     : list_(NULL) {
1634 }
1635
1636
1637 Isolate::ThreadDataTable::~ThreadDataTable() {
1638   // TODO(svenpanne) The assertion below would fire if an embedder does not
1639   // cleanly dispose all Isolates before disposing v8, so we are conservative
1640   // and leave it out for now.
1641   // ASSERT_EQ(NULL, list_);
1642 }
1643
1644
1645 Isolate::PerIsolateThreadData*
1646     Isolate::ThreadDataTable::Lookup(Isolate* isolate,
1647                                      ThreadId thread_id) {
1648   for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
1649     if (data->Matches(isolate, thread_id)) return data;
1650   }
1651   return NULL;
1652 }
1653
1654
1655 void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
1656   if (list_ != NULL) list_->prev_ = data;
1657   data->next_ = list_;
1658   list_ = data;
1659 }
1660
1661
1662 void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
1663   if (list_ == data) list_ = data->next_;
1664   if (data->next_ != NULL) data->next_->prev_ = data->prev_;
1665   if (data->prev_ != NULL) data->prev_->next_ = data->next_;
1666   delete data;
1667 }
1668
1669
1670 void Isolate::ThreadDataTable::Remove(Isolate* isolate,
1671                                       ThreadId thread_id) {
1672   PerIsolateThreadData* data = Lookup(isolate, thread_id);
1673   if (data != NULL) {
1674     Remove(data);
1675   }
1676 }
1677
1678
1679 void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
1680   PerIsolateThreadData* data = list_;
1681   while (data != NULL) {
1682     PerIsolateThreadData* next = data->next_;
1683     if (data->isolate() == isolate) Remove(data);
1684     data = next;
1685   }
1686 }
1687
1688
1689 #ifdef DEBUG
1690 #define TRACE_ISOLATE(tag)                                              \
1691   do {                                                                  \
1692     if (FLAG_trace_isolates) {                                          \
1693       PrintF("Isolate %p (id %d)" #tag "\n",                            \
1694              reinterpret_cast<void*>(this), id());                      \
1695     }                                                                   \
1696   } while (false)
1697 #else
1698 #define TRACE_ISOLATE(tag)
1699 #endif
1700
1701
1702 Isolate::Isolate()
1703     : state_(UNINITIALIZED),
1704       embedder_data_(NULL),
1705       entry_stack_(NULL),
1706       stack_trace_nesting_level_(0),
1707       incomplete_message_(NULL),
1708       preallocated_memory_thread_(NULL),
1709       preallocated_message_space_(NULL),
1710       bootstrapper_(NULL),
1711       runtime_profiler_(NULL),
1712       compilation_cache_(NULL),
1713       counters_(NULL),
1714       code_range_(NULL),
1715       // Must be initialized early to allow v8::SetResourceConstraints calls.
1716       break_access_(OS::CreateMutex()),
1717       debugger_initialized_(false),
1718       // Must be initialized early to allow v8::Debug calls.
1719       debugger_access_(OS::CreateMutex()),
1720       logger_(NULL),
1721       stats_table_(NULL),
1722       stub_cache_(NULL),
1723       deoptimizer_data_(NULL),
1724       capture_stack_trace_for_uncaught_exceptions_(false),
1725       stack_trace_for_uncaught_exceptions_frame_limit_(0),
1726       stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
1727       transcendental_cache_(NULL),
1728       memory_allocator_(NULL),
1729       keyed_lookup_cache_(NULL),
1730       context_slot_cache_(NULL),
1731       descriptor_lookup_cache_(NULL),
1732       handle_scope_implementer_(NULL),
1733       unicode_cache_(NULL),
1734       runtime_zone_(this),
1735       in_use_list_(0),
1736       free_list_(0),
1737       preallocated_storage_preallocated_(false),
1738       inner_pointer_to_code_cache_(NULL),
1739       write_iterator_(NULL),
1740       global_handles_(NULL),
1741       context_switcher_(NULL),
1742       thread_manager_(NULL),
1743       fp_stubs_generated_(false),
1744       has_installed_extensions_(false),
1745       string_tracker_(NULL),
1746       regexp_stack_(NULL),
1747       date_cache_(NULL),
1748       code_stub_interface_descriptors_(NULL),
1749       context_exit_happened_(false),
1750       cpu_profiler_(NULL),
1751       heap_profiler_(NULL),
1752       deferred_handles_head_(NULL),
1753       optimizing_compiler_thread_(this),
1754       marking_thread_(NULL),
1755       sweeper_thread_(NULL) {
1756   id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
1757   TRACE_ISOLATE(constructor);
1758
1759   memset(isolate_addresses_, 0,
1760       sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
1761
1762   heap_.isolate_ = this;
1763   stack_guard_.isolate_ = this;
1764
1765   // ThreadManager is initialized early to support locking an isolate
1766   // before it is entered.
1767   thread_manager_ = new ThreadManager();
1768   thread_manager_->isolate_ = this;
1769
1770 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
1771     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
1772   simulator_initialized_ = false;
1773   simulator_i_cache_ = NULL;
1774   simulator_redirection_ = NULL;
1775 #endif
1776
1777 #ifdef DEBUG
1778   // heap_histograms_ initializes itself.
1779   memset(&js_spill_information_, 0, sizeof(js_spill_information_));
1780   memset(code_kind_statistics_, 0,
1781          sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
1782
1783   compiler_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
1784   execution_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
1785 #endif
1786
1787 #ifdef ENABLE_DEBUGGER_SUPPORT
1788   debug_ = NULL;
1789   debugger_ = NULL;
1790 #endif
1791
1792   handle_scope_data_.Initialize();
1793
1794 #define ISOLATE_INIT_EXECUTE(type, name, initial_value)                        \
1795   name##_ = (initial_value);
1796   ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
1797 #undef ISOLATE_INIT_EXECUTE
1798
1799 #define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length)                         \
1800   memset(name##_, 0, sizeof(type) * length);
1801   ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
1802 #undef ISOLATE_INIT_ARRAY_EXECUTE
1803 }
1804
1805
1806 void Isolate::TearDown() {
1807   TRACE_ISOLATE(tear_down);
1808
1809   // Temporarily set this isolate as current so that various parts of
1810   // the isolate can access it in their destructors without having a
1811   // direct pointer. We don't use Enter/Exit here to avoid
1812   // initializing the thread data.
1813   PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
1814   Isolate* saved_isolate = UncheckedCurrent();
1815   SetIsolateThreadLocals(this, NULL);
1816
1817   Deinit();
1818
1819   { ScopedLock lock(process_wide_mutex_);
1820     thread_data_table_->RemoveAllThreads(this);
1821   }
1822
1823   if (serialize_partial_snapshot_cache_ != NULL) {
1824     delete[] serialize_partial_snapshot_cache_;
1825     serialize_partial_snapshot_cache_ = NULL;
1826   }
1827
1828   if (!IsDefaultIsolate()) {
1829     delete this;
1830   }
1831
1832   // Restore the previous current isolate.
1833   SetIsolateThreadLocals(saved_isolate, saved_data);
1834 }
1835
1836
1837 void Isolate::GlobalTearDown() {
1838   delete thread_data_table_;
1839 }
1840
1841
1842 void Isolate::Deinit() {
1843   if (state_ == INITIALIZED) {
1844     TRACE_ISOLATE(deinit);
1845
1846     if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
1847
1848     if (FLAG_sweeper_threads > 0) {
1849       for (int i = 0; i < FLAG_sweeper_threads; i++) {
1850         sweeper_thread_[i]->Stop();
1851         delete sweeper_thread_[i];
1852       }
1853       delete[] sweeper_thread_;
1854     }
1855
1856     if (FLAG_marking_threads > 0) {
1857       for (int i = 0; i < FLAG_marking_threads; i++) {
1858         marking_thread_[i]->Stop();
1859         delete marking_thread_[i];
1860       }
1861       delete[] marking_thread_;
1862     }
1863
1864     if (FLAG_hydrogen_stats) GetHStatistics()->Print();
1865
1866     // We must stop the logger before we tear down other components.
1867     Sampler* sampler = logger_->sampler();
1868     if (sampler && sampler->IsActive()) sampler->Stop();
1869
1870     delete deoptimizer_data_;
1871     deoptimizer_data_ = NULL;
1872     if (FLAG_preemption) {
1873       v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
1874       v8::Locker::StopPreemption();
1875     }
1876     builtins_.TearDown();
1877     bootstrapper_->TearDown();
1878
1879     // Remove the external reference to the preallocated stack memory.
1880     delete preallocated_message_space_;
1881     preallocated_message_space_ = NULL;
1882     PreallocatedMemoryThreadStop();
1883
1884     if (runtime_profiler_ != NULL) {
1885       runtime_profiler_->TearDown();
1886       delete runtime_profiler_;
1887       runtime_profiler_ = NULL;
1888     }
1889     heap_.TearDown();
1890     logger_->TearDown();
1891
1892     delete heap_profiler_;
1893     heap_profiler_ = NULL;
1894     delete cpu_profiler_;
1895     cpu_profiler_ = NULL;
1896
1897     // The default isolate is re-initializable due to legacy API.
1898     state_ = UNINITIALIZED;
1899   }
1900 }
1901
1902
1903 void Isolate::PushToPartialSnapshotCache(Object* obj) {
1904   int length = serialize_partial_snapshot_cache_length();
1905   int capacity = serialize_partial_snapshot_cache_capacity();
1906
1907   if (length >= capacity) {
1908     int new_capacity = static_cast<int>((capacity + 10) * 1.2);
1909     Object** new_array = new Object*[new_capacity];
1910     for (int i = 0; i < length; i++) {
1911       new_array[i] = serialize_partial_snapshot_cache()[i];
1912     }
1913     if (capacity != 0) delete[] serialize_partial_snapshot_cache();
1914     set_serialize_partial_snapshot_cache(new_array);
1915     set_serialize_partial_snapshot_cache_capacity(new_capacity);
1916   }
1917
1918   serialize_partial_snapshot_cache()[length] = obj;
1919   set_serialize_partial_snapshot_cache_length(length + 1);
1920 }
1921
1922
1923 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
1924                                      PerIsolateThreadData* data) {
1925   Thread::SetThreadLocal(isolate_key_, isolate);
1926   Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
1927 }
1928
1929
1930 Isolate::~Isolate() {
1931   TRACE_ISOLATE(destructor);
1932
1933   // Has to be called while counters_ are still alive.
1934   runtime_zone_.DeleteKeptSegment();
1935
1936   delete[] assembler_spare_buffer_;
1937   assembler_spare_buffer_ = NULL;
1938
1939   delete unicode_cache_;
1940   unicode_cache_ = NULL;
1941
1942   delete date_cache_;
1943   date_cache_ = NULL;
1944
1945   delete[] code_stub_interface_descriptors_;
1946   code_stub_interface_descriptors_ = NULL;
1947
1948   delete regexp_stack_;
1949   regexp_stack_ = NULL;
1950
1951   delete descriptor_lookup_cache_;
1952   descriptor_lookup_cache_ = NULL;
1953   delete context_slot_cache_;
1954   context_slot_cache_ = NULL;
1955   delete keyed_lookup_cache_;
1956   keyed_lookup_cache_ = NULL;
1957
1958   delete transcendental_cache_;
1959   transcendental_cache_ = NULL;
1960   delete stub_cache_;
1961   stub_cache_ = NULL;
1962   delete stats_table_;
1963   stats_table_ = NULL;
1964
1965   delete logger_;
1966   logger_ = NULL;
1967
1968   delete counters_;
1969   counters_ = NULL;
1970
1971   delete handle_scope_implementer_;
1972   handle_scope_implementer_ = NULL;
1973   delete break_access_;
1974   break_access_ = NULL;
1975   delete debugger_access_;
1976   debugger_access_ = NULL;
1977
1978   delete compilation_cache_;
1979   compilation_cache_ = NULL;
1980   delete bootstrapper_;
1981   bootstrapper_ = NULL;
1982   delete inner_pointer_to_code_cache_;
1983   inner_pointer_to_code_cache_ = NULL;
1984   delete write_iterator_;
1985   write_iterator_ = NULL;
1986
1987   delete context_switcher_;
1988   context_switcher_ = NULL;
1989   delete thread_manager_;
1990   thread_manager_ = NULL;
1991
1992   delete string_tracker_;
1993   string_tracker_ = NULL;
1994
1995   delete memory_allocator_;
1996   memory_allocator_ = NULL;
1997   delete code_range_;
1998   code_range_ = NULL;
1999   delete global_handles_;
2000   global_handles_ = NULL;
2001
2002   delete external_reference_table_;
2003   external_reference_table_ = NULL;
2004
2005 #ifdef ENABLE_DEBUGGER_SUPPORT
2006   delete debugger_;
2007   debugger_ = NULL;
2008   delete debug_;
2009   debug_ = NULL;
2010 #endif
2011 }
2012
2013
2014 void Isolate::InitializeThreadLocal() {
2015   thread_local_top_.isolate_ = this;
2016   thread_local_top_.Initialize();
2017 }
2018
2019
2020 void Isolate::PropagatePendingExceptionToExternalTryCatch() {
2021   ASSERT(has_pending_exception());
2022
2023   bool external_caught = IsExternallyCaught();
2024   thread_local_top_.external_caught_exception_ = external_caught;
2025
2026   if (!external_caught) return;
2027
2028   if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
2029     // Do not propagate OOM exception: we should kill VM asap.
2030   } else if (thread_local_top_.pending_exception_ ==
2031              heap()->termination_exception()) {
2032     try_catch_handler()->can_continue_ = false;
2033     try_catch_handler()->has_terminated_ = true;
2034     try_catch_handler()->exception_ = heap()->null_value();
2035   } else {
2036     // At this point all non-object (failure) exceptions have
2037     // been dealt with so this shouldn't fail.
2038     ASSERT(!pending_exception()->IsFailure());
2039     try_catch_handler()->can_continue_ = true;
2040     try_catch_handler()->has_terminated_ = false;
2041     try_catch_handler()->exception_ = pending_exception();
2042     if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
2043       try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
2044     }
2045   }
2046 }
2047
2048
2049 void Isolate::InitializeLoggingAndCounters() {
2050   if (logger_ == NULL) {
2051     logger_ = new Logger(this);
2052   }
2053   if (counters_ == NULL) {
2054     counters_ = new Counters(this);
2055   }
2056 }
2057
2058
2059 void Isolate::InitializeDebugger() {
2060 #ifdef ENABLE_DEBUGGER_SUPPORT
2061   ScopedLock lock(debugger_access_);
2062   if (NoBarrier_Load(&debugger_initialized_)) return;
2063   InitializeLoggingAndCounters();
2064   debug_ = new Debug(this);
2065   debugger_ = new Debugger(this);
2066   Release_Store(&debugger_initialized_, true);
2067 #endif
2068 }
2069
2070
2071 bool Isolate::Init(Deserializer* des) {
2072   ASSERT(state_ != INITIALIZED);
2073   ASSERT(Isolate::Current() == this);
2074   TRACE_ISOLATE(init);
2075
2076   // The initialization process does not handle memory exhaustion.
2077   DisallowAllocationFailure disallow_allocation_failure;
2078
2079   InitializeLoggingAndCounters();
2080
2081   InitializeDebugger();
2082
2083   memory_allocator_ = new MemoryAllocator(this);
2084   code_range_ = new CodeRange(this);
2085
2086   // Safe after setting Heap::isolate_, initializing StackGuard and
2087   // ensuring that Isolate::Current() == this.
2088   heap_.SetStackLimits();
2089
2090 #define ASSIGN_ELEMENT(CamelName, hacker_name)                  \
2091   isolate_addresses_[Isolate::k##CamelName##Address] =          \
2092       reinterpret_cast<Address>(hacker_name##_address());
2093   FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
2094 #undef C
2095
2096   string_tracker_ = new StringTracker();
2097   string_tracker_->isolate_ = this;
2098   compilation_cache_ = new CompilationCache(this);
2099   transcendental_cache_ = new TranscendentalCache();
2100   keyed_lookup_cache_ = new KeyedLookupCache();
2101   context_slot_cache_ = new ContextSlotCache();
2102   descriptor_lookup_cache_ = new DescriptorLookupCache();
2103   unicode_cache_ = new UnicodeCache();
2104   inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
2105   write_iterator_ = new ConsStringIteratorOp();
2106   global_handles_ = new GlobalHandles(this);
2107   bootstrapper_ = new Bootstrapper(this);
2108   handle_scope_implementer_ = new HandleScopeImplementer(this);
2109   stub_cache_ = new StubCache(this, runtime_zone());
2110   regexp_stack_ = new RegExpStack();
2111   regexp_stack_->isolate_ = this;
2112   date_cache_ = new DateCache();
2113   code_stub_interface_descriptors_ =
2114       new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
2115   cpu_profiler_ = new CpuProfiler(this);
2116   heap_profiler_ = new HeapProfiler(heap());
2117
2118   // Enable logging before setting up the heap
2119   logger_->SetUp(this);
2120
2121   // Initialize other runtime facilities
2122 #if defined(USE_SIMULATOR)
2123 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
2124   Simulator::Initialize(this);
2125 #endif
2126 #endif
2127
2128   { // NOLINT
2129     // Ensure that the thread has a valid stack guard.  The v8::Locker object
2130     // will ensure this too, but we don't have to use lockers if we are only
2131     // using one thread.
2132     ExecutionAccess lock(this);
2133     stack_guard_.InitThread(lock);
2134   }
2135
2136   // SetUp the object heap.
2137   ASSERT(!heap_.HasBeenSetUp());
2138   if (!heap_.SetUp()) {
2139     V8::FatalProcessOutOfMemory("heap setup");
2140     return false;
2141   }
2142
2143   deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
2144
2145   const bool create_heap_objects = (des == NULL);
2146   if (create_heap_objects && !heap_.CreateHeapObjects()) {
2147     V8::FatalProcessOutOfMemory("heap object creation");
2148     return false;
2149   }
2150
2151   if (create_heap_objects) {
2152     // Terminate the cache array with the sentinel so we can iterate.
2153     PushToPartialSnapshotCache(heap_.undefined_value());
2154   }
2155
2156   InitializeThreadLocal();
2157
2158   bootstrapper_->Initialize(create_heap_objects);
2159   builtins_.SetUp(create_heap_objects);
2160
2161   // Only preallocate on the first initialization.
2162   if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
2163     // Start the thread which will set aside some memory.
2164     PreallocatedMemoryThreadStart();
2165     preallocated_message_space_ =
2166         new NoAllocationStringAllocator(
2167             preallocated_memory_thread_->data(),
2168             preallocated_memory_thread_->length());
2169     PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
2170   }
2171
2172   if (FLAG_preemption) {
2173     v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
2174     v8::Locker::StartPreemption(100);
2175   }
2176
2177 #ifdef ENABLE_DEBUGGER_SUPPORT
2178   debug_->SetUp(create_heap_objects);
2179 #endif
2180
2181   // If we are deserializing, read the state into the now-empty heap.
2182   if (!create_heap_objects) {
2183     des->Deserialize();
2184   }
2185   stub_cache_->Initialize();
2186
2187   // Finish initialization of ThreadLocal after deserialization is done.
2188   clear_pending_exception();
2189   clear_pending_message();
2190   clear_scheduled_exception();
2191
2192   // Deserializing may put strange things in the root array's copy of the
2193   // stack guard.
2194   heap_.SetStackLimits();
2195
2196   // Quiet the heap NaN if needed on target platform.
2197   if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
2198
2199   runtime_profiler_ = new RuntimeProfiler(this);
2200   runtime_profiler_->SetUp();
2201
2202   // If we are deserializing, log non-function code objects and compiled
2203   // functions found in the snapshot.
2204   if (!create_heap_objects &&
2205       (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
2206     HandleScope scope(this);
2207     LOG(this, LogCodeObjects());
2208     LOG(this, LogCompiledFunctions());
2209   }
2210
2211   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
2212            Internals::kIsolateStateOffset);
2213   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
2214            Internals::kIsolateEmbedderDataOffset);
2215   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
2216            Internals::kIsolateRootsOffset);
2217
2218   state_ = INITIALIZED;
2219   time_millis_at_init_ = OS::TimeCurrentMillis();
2220
2221   if (!create_heap_objects) {
2222     // Now that the heap is consistent, it's OK to generate the code for the
2223     // deopt entry table that might have been referred to by optimized code in
2224     // the snapshot.
2225     HandleScope scope(this);
2226     Deoptimizer::EnsureCodeForDeoptimizationEntry(
2227         this,
2228         Deoptimizer::LAZY,
2229         kDeoptTableSerializeEntryCount - 1);
2230   }
2231
2232   if (!Serializer::enabled()) {
2233     // Ensure that all stubs which need to be generated ahead of time, but
2234     // cannot be serialized into the snapshot have been generated.
2235     HandleScope scope(this);
2236     CodeStub::GenerateFPStubs(this);
2237     StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
2238     StubFailureTrampolineStub::GenerateAheadOfTime(this);
2239     // TODO(mstarzinger): The following is an ugly hack to make sure the
2240     // interface descriptor is initialized even when stubs have been
2241     // deserialized out of the snapshot without the graph builder.
2242     FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
2243                                    DONT_TRACK_ALLOCATION_SITE, 0);
2244     stub.InitializeInterfaceDescriptor(
2245         this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
2246     CompareNilICStub::InitializeForIsolate(this);
2247     ArrayConstructorStubBase::InstallDescriptors(this);
2248   }
2249
2250   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
2251
2252   if (FLAG_parallel_marking && FLAG_marking_threads == 0) {
2253     FLAG_marking_threads = SystemThreadManager::
2254         NumberOfParallelSystemThreads(
2255             SystemThreadManager::PARALLEL_MARKING);
2256   }
2257   if (FLAG_marking_threads > 0) {
2258     marking_thread_ = new MarkingThread*[FLAG_marking_threads];
2259     for (int i = 0; i < FLAG_marking_threads; i++) {
2260       marking_thread_[i] = new MarkingThread(this);
2261       marking_thread_[i]->Start();
2262     }
2263   } else {
2264     FLAG_parallel_marking = false;
2265   }
2266
2267   if (FLAG_sweeper_threads == 0) {
2268     if (FLAG_concurrent_sweeping) {
2269       FLAG_sweeper_threads = SystemThreadManager::
2270           NumberOfParallelSystemThreads(
2271               SystemThreadManager::CONCURRENT_SWEEPING);
2272     } else if (FLAG_parallel_sweeping) {
2273       FLAG_sweeper_threads = SystemThreadManager::
2274           NumberOfParallelSystemThreads(
2275               SystemThreadManager::PARALLEL_SWEEPING);
2276     }
2277   }
2278   if (FLAG_sweeper_threads > 0) {
2279     sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
2280     for (int i = 0; i < FLAG_sweeper_threads; i++) {
2281       sweeper_thread_[i] = new SweeperThread(this);
2282       sweeper_thread_[i]->Start();
2283     }
2284   } else {
2285     FLAG_concurrent_sweeping = false;
2286     FLAG_parallel_sweeping = false;
2287   }
2288   if (FLAG_parallel_recompilation &&
2289       SystemThreadManager::NumberOfParallelSystemThreads(
2290           SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
2291     FLAG_parallel_recompilation = false;
2292   }
2293   return true;
2294 }
2295
2296
2297 // Initialized lazily to allow early
2298 // v8::V8::SetAddHistogramSampleFunction calls.
2299 StatsTable* Isolate::stats_table() {
2300   if (stats_table_ == NULL) {
2301     stats_table_ = new StatsTable;
2302   }
2303   return stats_table_;
2304 }
2305
2306
2307 void Isolate::Enter() {
2308   Isolate* current_isolate = NULL;
2309   PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
2310   if (current_data != NULL) {
2311     current_isolate = current_data->isolate_;
2312     ASSERT(current_isolate != NULL);
2313     if (current_isolate == this) {
2314       ASSERT(Current() == this);
2315       ASSERT(entry_stack_ != NULL);
2316       ASSERT(entry_stack_->previous_thread_data == NULL ||
2317              entry_stack_->previous_thread_data->thread_id().Equals(
2318                  ThreadId::Current()));
2319       // Same thread re-enters the isolate, no need to re-init anything.
2320       entry_stack_->entry_count++;
2321       return;
2322     }
2323   }
2324
2325   // Threads can have default isolate set into TLS as Current but not yet have
2326   // PerIsolateThreadData for it, as it requires more advanced phase of the
2327   // initialization. For example, a thread might be the one that system used for
2328   // static initializers - in this case the default isolate is set in TLS but
2329   // the thread did not yet Enter the isolate. If PerisolateThreadData is not
2330   // there, use the isolate set in TLS.
2331   if (current_isolate == NULL) {
2332     current_isolate = Isolate::UncheckedCurrent();
2333   }
2334
2335   PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
2336   ASSERT(data != NULL);
2337   ASSERT(data->isolate_ == this);
2338
2339   EntryStackItem* item = new EntryStackItem(current_data,
2340                                             current_isolate,
2341                                             entry_stack_);
2342   entry_stack_ = item;
2343
2344   SetIsolateThreadLocals(this, data);
2345
2346   // In case it's the first time some thread enters the isolate.
2347   set_thread_id(data->thread_id());
2348 }
2349
2350
2351 void Isolate::Exit() {
2352   ASSERT(entry_stack_ != NULL);
2353   ASSERT(entry_stack_->previous_thread_data == NULL ||
2354          entry_stack_->previous_thread_data->thread_id().Equals(
2355              ThreadId::Current()));
2356
2357   if (--entry_stack_->entry_count > 0) return;
2358
2359   ASSERT(CurrentPerIsolateThreadData() != NULL);
2360   ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
2361
2362   // Pop the stack.
2363   EntryStackItem* item = entry_stack_;
2364   entry_stack_ = item->previous_item;
2365
2366   PerIsolateThreadData* previous_thread_data = item->previous_thread_data;
2367   Isolate* previous_isolate = item->previous_isolate;
2368
2369   delete item;
2370
2371   // Reinit the current thread for the isolate it was running before this one.
2372   SetIsolateThreadLocals(previous_isolate, previous_thread_data);
2373 }
2374
2375
2376 void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
2377   deferred->next_ = deferred_handles_head_;
2378   if (deferred_handles_head_ != NULL) {
2379     deferred_handles_head_->previous_ = deferred;
2380   }
2381   deferred_handles_head_ = deferred;
2382 }
2383
2384
2385 void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
2386 #ifdef DEBUG
2387   // In debug mode assert that the linked list is well-formed.
2388   DeferredHandles* deferred_iterator = deferred;
2389   while (deferred_iterator->previous_ != NULL) {
2390     deferred_iterator = deferred_iterator->previous_;
2391   }
2392   ASSERT(deferred_handles_head_ == deferred_iterator);
2393 #endif
2394   if (deferred_handles_head_ == deferred) {
2395     deferred_handles_head_ = deferred_handles_head_->next_;
2396   }
2397   if (deferred->next_ != NULL) {
2398     deferred->next_->previous_ = deferred->previous_;
2399   }
2400   if (deferred->previous_ != NULL) {
2401     deferred->previous_->next_ = deferred->next_;
2402   }
2403 }
2404
2405
2406 #ifdef DEBUG
2407 HandleDereferenceGuard::State Isolate::HandleDereferenceGuardState() {
2408   if (execution_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW &&
2409       compiler_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW) {
2410     // Short-cut to avoid polling thread id.
2411     return HandleDereferenceGuard::ALLOW;
2412   }
2413   if (FLAG_parallel_recompilation &&
2414       optimizing_compiler_thread()->IsOptimizerThread()) {
2415     return compiler_thread_handle_deref_state_;
2416   } else {
2417     return execution_thread_handle_deref_state_;
2418   }
2419 }
2420
2421
2422 void Isolate::SetHandleDereferenceGuardState(
2423     HandleDereferenceGuard::State state) {
2424   if (FLAG_parallel_recompilation &&
2425       optimizing_compiler_thread()->IsOptimizerThread()) {
2426     compiler_thread_handle_deref_state_ = state;
2427   } else {
2428     execution_thread_handle_deref_state_ = state;
2429   }
2430 }
2431 #endif
2432
2433
2434 HStatistics* Isolate::GetHStatistics() {
2435   if (hstatistics() == NULL) set_hstatistics(new HStatistics());
2436   return hstatistics();
2437 }
2438
2439
2440 HTracer* Isolate::GetHTracer() {
2441   if (htracer() == NULL) set_htracer(new HTracer(id()));
2442   return htracer();
2443 }
2444
2445
2446 CodeStubInterfaceDescriptor*
2447     Isolate::code_stub_interface_descriptor(int index) {
2448   return code_stub_interface_descriptors_ + index;
2449 }
2450
2451
2452 #ifdef DEBUG
2453 #define ISOLATE_FIELD_OFFSET(type, name, ignored)                       \
2454 const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
2455 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
2456 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
2457 #undef ISOLATE_FIELD_OFFSET
2458 #endif
2459
2460 } }  // namespace v8::internal